1 #include <tgsi/tgsi_text.h>
3 #define L_TERMINATOR ""
4 #define L_TEXTURE "SAMP"
5 #define L_PROGRAM_ENV "CONST"
7 #define L_RESULT_COLOR_TEMP "TEMP[0]"
8 #define L_SET_RESULT_COLOR(p) memcpy(p, " OUT", 4)
10 #define L_TMP "TEMP[1]"
12 #define L_TEMP_RESULT_COLOR "DCL TEMP[0], CONSTANT"
13 #define L_TEMP_TMP "DCL TEMP[1], CONSTANT"
18 #define L_SCALAR_X ".xxxx"
20 #include "cairo-gpu-impl-programs.h"
22 #define CREATE_STATE(name) do { void* todel = 0; \
23 p = ctx->pipe->create_##name##_state(ctx->pipe, &name); \
24 CAIRO_MUTEX_LOCK(ctx->space->mutex); \
31 CAIRO_MUTEX_UNLOCK(ctx->space->mutex); \
32 if(todel) ctx->pipe->delete_##name##_state(ctx->pipe, p); \
35 #define CREATE_STATE_HASH(name) do { void* pe; void* todel = 0; \
36 p = ctx->pipe->create_##name##_state(ctx->pipe, &name); \
37 CAIRO_MUTEX_LOCK(ctx->space->mutex); \
38 pe = _cairo_gpu_space_lookup_ptr_unlocked(ctx->space, hash); \
45 _cairo_gpu_space_store_ptr_unlocked(ctx->space, hash, p); \
46 CAIRO_MUTEX_UNLOCK(ctx->space->mutex); \
47 if(todel) ctx->pipe->delete_##name##_state(ctx->pipe, todel); \
51 _cairo_gpu_context_set_vert_param(cairo_gpu_context_t
* ctx
, unsigned i
, float* v
)
53 if(memcmp(&ctx
->vert_constants
[i
* 4], v
, 4 * sizeof(float)))
55 memcpy(&ctx
->vert_constants
[i
* 4], v
, 4 * sizeof(float));
56 ctx
->vert_constants_dirty
= 1;
61 _cairo_gpu_context_set_frag_param(cairo_gpu_context_t
* ctx
, unsigned i
, float* v
)
63 if(memcmp(&ctx
->frag_constants
[i
* 4], v
, 4 * sizeof(float)))
65 memcpy(&ctx
->frag_constants
[i
* 4], v
, 4 * sizeof(float));
66 ctx
->frag_constants_dirty
= 1;
71 _cairo_gpu_context_init(cairo_gpu_context_t
* ctx
)
77 ctx
->viewport_height
= -1;
78 ctx
->viewport_width
= -1;
79 memset(ctx
->samplers
, 0xff, sizeof(ctx
->samplers
));
82 static struct pipe_context
*
83 _cairo_gallium_space_create_context(cairo_gpu_space_t
* space
);
86 _cairo_gpu_context_do_bind_space(cairo_gpu_context_t
* ctx
)
92 struct pipe_depth_stencil_alpha_state depth_stencil_alpha
;
94 ctx
->pipe
= _cairo_gallium_space_create_context(ctx
->space
);
96 if(ctx
->space
->real_screen
)
97 ctx
->pipe
= trace_context_create(ctx
->space
->screen
, ctx
->pipe
);
99 pp
= &ctx
->space
->zsa
;
103 memset(&depth_stencil_alpha
, 0, sizeof(depth_stencil_alpha
));
105 CREATE_STATE(depth_stencil_alpha
);
108 ctx
->pipe
->bind_depth_stencil_alpha_state(ctx
->pipe
, p
);
114 _cairo_gpu_context_bind_space(cairo_gpu_context_t
* ctx
)
117 _cairo_gpu_context_do_bind_space(ctx
);
121 _cairo_gpu_context_bind(cairo_gpu_context_t
* ctx
, cairo_gpu_surface_t
* surface
)
123 _cairo_gpu_context_bind_space(ctx
);
127 _cairo_gallium_context_set_framebuffer(cairo_gpu_context_t
* ctx
, struct pipe_surface
* surface
, unsigned width
, unsigned height
)
129 if(surface
!= ctx
->surface
|| width
!= ctx
->width
|| height
!= ctx
->height
)
131 struct pipe_framebuffer_state fb
;
134 fb
.cbufs
[0] = surface
;
135 for(i
= 1; i
< PIPE_MAX_COLOR_BUFS
; ++i
)
141 ctx
->pipe
->set_framebuffer_state(ctx
->pipe
, &fb
);
143 ctx
->surface
= surface
;
145 ctx
->height
= height
;
149 // note that attributes->matrix must be adjusted by the caller, if necessary!!
151 _cairo_gpu_context_set_texture_and_attributes_(cairo_gpu_context_t
* ctx
, int idx
, cairo_gpu_texture_t
* texture
, cairo_surface_attributes_t
* attributes
, float* zm
)
153 unsigned hash
= TABLE_SAMPLER
| attributes
->extend
| ((unsigned)attributes
->extra
<< 2) | (attributes
->filter
<< 3);
157 if(hash
!= ctx
->samplers
[idx
])
159 void* p
= _cairo_gpu_space_lookup_ptr(ctx
->space
, hash
);
163 struct pipe_sampler_state sampler
;
164 unsigned filter
, wrap
;
166 memset(&sampler
, 0, sizeof(sampler
));
167 if(!(ctx
->space
->extend_mask
& (1 << attributes
->extend
)))
168 wrap
= PIPE_TEX_WRAP_CLAMP
;
171 switch (attributes
->extend
)
173 case CAIRO_EXTEND_NONE
:
174 wrap
= PIPE_TEX_WRAP_CLAMP_TO_BORDER
;
176 case CAIRO_EXTEND_PAD
:
177 wrap
= PIPE_TEX_WRAP_CLAMP_TO_EDGE
;
179 case CAIRO_EXTEND_REPEAT
:
180 wrap
= PIPE_TEX_WRAP_REPEAT
;
182 case CAIRO_EXTEND_REFLECT
:
183 wrap
= PIPE_TEX_WRAP_MIRROR_REPEAT
;
190 if((unsigned)attributes
->extra
& 1)
192 sampler
.wrap_s
= wrap
;
193 sampler
.wrap_t
= PIPE_TEX_WRAP_CLAMP_TO_EDGE
;
196 sampler
.wrap_t
= sampler
.wrap_s
= wrap
;
198 switch (attributes
->filter
)
200 case CAIRO_FILTER_FAST
:
201 case CAIRO_FILTER_NEAREST
:
202 filter
= PIPE_TEX_FILTER_NEAREST
;
204 case CAIRO_FILTER_BEST
:
205 if(ctx
->space
->max_anisotropy
> 1.0)
207 sampler
.max_anisotropy
= ctx
->space
->max_anisotropy
;
208 filter
= PIPE_TEX_FILTER_ANISO
;
211 filter
= PIPE_TEX_FILTER_LINEAR
;
213 case CAIRO_FILTER_GOOD
:
214 if(ctx
->space
->max_anisotropy
> 1.0)
216 sampler
.max_anisotropy
= MIN(4.0, ctx
->space
->max_anisotropy
);
217 filter
= PIPE_TEX_FILTER_ANISO
;
220 filter
= PIPE_TEX_FILTER_LINEAR
;
222 case CAIRO_FILTER_BILINEAR
:
223 filter
= PIPE_TEX_FILTER_LINEAR
;
226 case CAIRO_FILTER_GAUSSIAN
:
230 sampler
.min_img_filter
= sampler
.mag_img_filter
= filter
;
232 // TODO: we would prefer unnormalized. Does it actually work in Gallium, also with REPEAT and REFLECT?
233 sampler
.normalized_coords
= 1;
235 CREATE_STATE_HASH(sampler
);
238 if(p
!= ctx
->ssamplers
[idx
])
240 ctx
->ssamplers
[idx
] = p
;
241 ctx
->ssamplers_dirty
= 1;
244 ctx
->samplers
[idx
] = hash
;
247 if(texture
->texture
!= ctx
->textures
[idx
])
249 ctx
->textures
[idx
] = texture
->texture
;
250 ctx
->textures_dirty
= 1;
254 cairo_matrix_t
* matrix
= &attributes
->matrix
;
255 float xv
[4] = {matrix
->xx
, matrix
->yx
, zm
[0], 0};
256 float yv
[4] = {matrix
->xy
, matrix
->yy
, zm
[1], 0};
257 float wv
[4] = {matrix
->x0
, matrix
->y0
, zm
[2], 1};
258 _cairo_gpu_context_set_vert_param(ctx
, VERTENV_TEX_MATRIX_X(idx
), xv
);
259 _cairo_gpu_context_set_vert_param(ctx
, VERTENV_TEX_MATRIX_Y(idx
), yv
);
260 _cairo_gpu_context_set_vert_param(ctx
, VERTENV_TEX_MATRIX_W(idx
), wv
);
265 _cairo_gpu_context_set_texture_and_attributes(cairo_gpu_context_t
* ctx
, int idx
, cairo_gpu_texture_t
* texture
, cairo_surface_attributes_t
* attributes
)
267 _cairo_gpu_context_set_texture_and_attributes_(ctx
, idx
, texture
, attributes
, _cairo_gpu_vec4_zero
);
271 _cairo_gpu_surface_create_tex(cairo_gpu_context_t
* ctx
, cairo_gpu_surface_t
* surface
);
274 _cairo_gallium_context_upload_pixels(cairo_gpu_context_t
* ctx
, cairo_gpu_texture_t
* texture
, cairo_image_surface_t
* image_surface
, int src_x
, int src_y
, int width
, int height
, int dst_x
, int dst_y
)
276 struct pipe_transfer
* transfer
;
277 pixman_image_t
* dst_image
;
280 assert(texture
->texture
);
284 if(ctx
->pipe
->is_texture_referenced(ctx
->pipe
, texture
->texture
, 0, 0))
286 struct pipe_fence_handle
* fence
;
287 ctx
->pipe
->flush(ctx
->pipe
, PIPE_FLUSH_RENDER_CACHE
, &fence
);
291 ctx
->space
->screen
->fence_finish(ctx
->space
->screen
, fence
, 0);
292 ctx
->space
->screen
->fence_reference(ctx
->space
->screen
, &fence
, 0);
297 transfer
= ctx
->space
->screen
->get_tex_transfer(ctx
->space
->screen
, texture
->texture
, 0, 0, 0, PIPE_TRANSFER_WRITE
, dst_x
>= 0 ? dst_x
: 0, dst_y
>= 0 ? dst_y
: 0, width
, height
);
298 dstp
= ctx
->space
->screen
->transfer_map(ctx
->space
->screen
, transfer
);
299 dst_image
= pixman_image_create_bits(PIXMAN_a8r8g8b8
, width
, height
, (uint32_t*)dstp
, transfer
->stride
);
301 pixman_image_composite (PIXMAN_OP_SRC
,
302 image_surface
->pixman_image
,
310 pixman_image_unref(dst_image
);
312 ctx
->space
->screen
->transfer_unmap(ctx
->space
->screen
, transfer
);
313 ctx
->space
->screen
->tex_transfer_destroy(transfer
);
317 #include <emmintrin.h>
321 #include <tmmintrin.h>
325 _cairo_gpu_context_upload_data(cairo_gpu_context_t
* ctx
, int idx
, cairo_gpu_texture_t
* texture
, float* p
, int width
, int height
)
328 struct pipe_transfer
* transfer
;
331 // TODO: support fp32 textures
332 transfer
= ctx
->space
->screen
->get_tex_transfer(ctx
->space
->screen
, texture
->texture
, 0, 0, 0, PIPE_TRANSFER_WRITE
, 0, 0, width
, height
);
333 map
= ctx
->space
->screen
->transfer_map(ctx
->space
->screen
, transfer
);
335 if(transfer
->format
== PIPE_FORMAT_R32G32B32A32_FLOAT
)
337 unsigned char* dstp
= map
;
339 if(transfer
->stride
== width
* 4 * sizeof(float))
340 memcpy(dstp
, p
, width
* height
* 4 * sizeof(float));
343 for(y
= height
; y
; --y
)
345 memcpy(dstp
, p
, width
* 4 * sizeof(float));
347 dstp
+= transfer
->stride
- width
* 4 * sizeof(float);
352 else if(transfer
->format
== PIPE_FORMAT_R16G16B16A16_FLOAT
)
354 unsigned short* dstp
= map
;
355 for(y
= 0; y
< height
; ++y
)
357 for(x
= 0; x
< width
* 4; ++x
)
358 *dstp
++ = _cairo_float_to_half(*p
++);
359 dstp
= (unsigned short*)((char*)dstp
+ transfer
->stride
- width
* 4 * sizeof(unsigned short));
363 else if(transfer
->format
== PIPE_FORMAT_A8R8G8B8_UNORM
)
366 unsigned* dstp
= (unsigned*)map
;
367 __m128
* mp
= (__m128
*)p
;
371 unsigned char* shuffleb
= (unsigned char*)&shuffle
;
376 memset(shuffleb
+ 4, 0, 12);
378 c256me
= _mm_set_ps1(nextafterf(256.0f
, 0.0f
));
380 for(y
= height
; y
; --y
)
382 // TODO: do 4 pixels at a time - beware of alignment issues
383 for(x
= width
; x
; --x
)
385 __m128i v
= _mm_cvttps_epi32(_mm_mul_ps(*mp
++, c256me
));
389 // TODO: is endianness correct? it should be.
391 s
= _mm_shuffle_epi8(v
, shuffle
);
394 s
= _mm_or_si128(s
, _mm_srli_si128(v
, 24));
395 s
= _mm_or_si128(s
, _mm_srli_si128(v
, 16 + 32));
396 s
= _mm_or_si128(s
, _mm_srli_si128(v
, 8 + 64));
398 *dstp
++ = _mm_cvtsi128_si32(v
);
401 dstp
= (unsigned*)((char*)dstp
+ transfer
->stride
- width
* 4 * sizeof(unsigned char));
404 unsigned char* dstp
= map
;
405 for(y
= height
; y
; --y
)
407 for(x
= width
* 4; x
; --x
)
408 *dstp
++ = _cairo_color_float_to_byte(*p
++);
410 dstp
+= transfer
->stride
- width
* 4 * sizeof(unsigned char);
417 ctx
->space
->screen
->transfer_unmap(ctx
->space
->screen
, transfer
);
418 ctx
->space
->screen
->tex_transfer_destroy(transfer
);
421 static __attribute__((unused
)) cairo_status_t
422 _cairo_gpu_context_blit_pixels(cairo_gpu_context_t
* ctx
, cairo_gpu_surface_t
* dst
, cairo_image_surface_t
* image_surface
, int src_x
, int src_y
, int width
, int height
, int dst_x
, int dst_y
)
424 _cairo_gallium_context_upload_pixels(ctx
, &dst
->texture
, image_surface
, src_x
, src_y
, width
, height
, dst_x
, dst_y
);
425 return CAIRO_STATUS_SUCCESS
;
429 _cairo_gpu_context_upload_pixels(cairo_gpu_context_t
* ctx
, cairo_gpu_surface_t
* dst
, int idx
, cairo_gpu_texture_t
* texture
, cairo_image_surface_t
* image_surface
, int src_x
, int src_y
, int width
, int height
, int dst_x
, int dst_y
)
431 _cairo_gallium_context_upload_pixels(ctx
, texture
, image_surface
, src_x
, src_y
, width
, height
, dst_x
, dst_y
);
435 #error Add more IN/OUT variables
438 static const char* _cairo_gpu_in
[] = {"IN[0]", "IN[1]", "IN[2]", "IN[3]"};
439 static const char* _cairo_gpu_out
[] = {"OUT[0]", "OUT[1]", "OUT[2]", "OUT[3]"};
442 _cairo_gpu_context__set_vert(cairo_gpu_context_t
* ctx
, unsigned vert
)
444 if((int)vert
!= ctx
->vert
)
446 unsigned hash
= TABLE_VERT
| vert
;
447 void* p
= _cairo_gpu_space_lookup_ptr(ctx
->space
, hash
);
451 struct pipe_shader_state vs
;
452 struct tgsi_token tokens
[1024];
454 cairo_gpu_program_builder_t pb
;
455 cairo_gpu_program_builder_t
* b
= &pb
;
456 cairo_gpu_string_builder_t
* builder
= &b
->body
;
459 memset(&pb
, 0, sizeof(pb
));
460 b
->has_01_swizzles
= 1;
461 b
->div_uses
= b
->dp2a_uses
= 0;
465 OUT("DCL IN[0], POSITION");
466 b
->in_position
= _cairo_gpu_in
[0];
468 if(vert
& (VERT_COLOR_PREOP
| VERT_COLOR_POSTOP
))
470 OUT("DCL IN[1], COLOR");
471 b
->in_color
= _cairo_gpu_in
[1];
476 for(j
= 0; j
< MAX_OPERANDS
; ++j
)
478 unsigned tex
= (vert
>> (VERT_TEX_SHIFT
+ j
* VERT_TEX_BITS
)) & VERT_TEX_MASK
;
479 if(tex
&& tex
!= VERT_TEX_GEN
)
487 OUTF("DCL IN[%i], GENERIC", i
);
488 b
->in_texcoord
[0] = b
->in_texcoord
[1] = _cairo_gpu_in
[i
];
492 OUT("DCL OUT[0], POSITION");
493 b
->out_position
= _cairo_gpu_out
[0];
495 if(vert
& (VERT_COLOR_PREOP
| VERT_COLOR_POSTOP
))
497 OUT("DCL OUT[1], COLOR");
498 b
->out_color
= _cairo_gpu_out
[1];
502 for(j
= 0; j
< MAX_OPERANDS
; ++j
)
504 unsigned tex
= (vert
>> (VERT_TEX_SHIFT
+ j
* VERT_TEX_BITS
)) & VERT_TEX_MASK
;
507 OUTF("DCL OUT[%i], GENERIC[%i]", i
, j
);
508 b
->out_texcoord
[j
] = _cairo_gpu_out
[i
];
513 // TODO: only emit needed ones to shut up Galliium
514 OUTF("DCL CONST[0..%i]", VERTENV_COUNT
- 1);
515 OUT("IMM FLT32 {0.0, 0.0, 0.0, 0.0}");
516 OUT("IMM FLT32 {1.0, 1.0, 1.0, 1.0}");
518 _cairo_gpu_write_vert_position(b
, vert
);
519 _cairo_gpu_write_vert(b
, vert
);
521 ps
= _cairo_gpu_program_builder_finish(b
);
524 tgsi_text_translate(ps
, tokens
, sizeof(tokens
));
527 CREATE_STATE_HASH(vs
);
530 ctx
->pipe
->bind_vs_state(ctx
->pipe
, p
);
536 _cairo_gpu_context__set_frag(cairo_gpu_context_t
* ctx
, unsigned frag
)
538 if((int)frag
!= ctx
->frag
)
540 unsigned hash
= TABLE_FRAG
| frag
;
541 void* p
= _cairo_gpu_space_lookup_ptr(ctx
->space
, hash
);
545 struct pipe_shader_state fs
;
546 struct tgsi_token tokens
[1024];
548 cairo_gpu_program_builder_t pb
;
549 cairo_gpu_program_builder_t
* b
= &pb
;
550 cairo_gpu_string_builder_t
* builder
= &b
->body
;
553 memset(&pb
, 0, sizeof(pb
));
554 b
->has_01_swizzles
= 1;
555 b
->div_uses
= b
->dp2a_uses
= 0;
560 OUT("DCL IN[0], POSITION, LINEAR");
561 b
->in_position
= _cairo_gpu_in
[0];
563 if(frag
& FRAG_PRIMARY
)
565 OUT("DCL IN[1], COLOR, CONSTANT");
566 b
->in_color
= _cairo_gpu_in
[1];
569 for(j
= 0; j
< MAX_OPERANDS
; ++j
)
571 unsigned tex
= frag
>> (FRAG_TEX_SHIFT
+ j
* FRAG_TEX_BITS
);
574 OUTF("DCL IN[%i], GENERIC[%i], LINEAR", i
, j
);
575 b
->in_texcoord
[j
] = _cairo_gpu_in
[i
];
580 OUT("DCL OUT[0], COLOR, CONSTANT");
581 b
->out_color
= _cairo_gpu_out
[0];
583 OUTF("DCL CONST[0..%i], CONSTANT", FRAGENV_COUNT
- 1);
584 // TODO: only declare used ones?
586 for(j
= 0; j
< MAX_OPERANDS
; ++j
)
588 unsigned tex
= (frag
>> (FRAG_TEX_SHIFT
+ j
* FRAG_TEX_BITS
)) & FRAG_TEX_MASK
;
590 OUTF("DCL SAMP[%i], CONSTANT", j
);
593 // TODO: only emit needed ones to shut up Galliium
594 OUT("IMM FLT32 {0.0, 0.0, 0.0, 0.0}");
595 OUT("IMM FLT32 {1.0, 1.0, 1.0, 1.0}");
597 _cairo_gpu_write_frag(b
, frag
);
598 ps
= _cairo_gpu_program_builder_finish(b
);
601 tgsi_text_translate(ps
, tokens
, sizeof(tokens
));
604 CREATE_STATE_HASH(fs
);
607 ctx
->pipe
->bind_fs_state(ctx
->pipe
, p
);
613 _cairo_gpu_context_set_constant_color(cairo_gpu_context_t
* ctx
, cairo_gpu_color4_t
* color
)
615 _cairo_gpu_context_set_frag_param(ctx
, FRAGENV_CONSTANT
, &color
->c
.r
);
616 //printf("%f %f %f %f\n", color->c.r, color->c.g, color->c.b, color->ka);
620 _cairo_gpu_context_set_vert_frag(cairo_gpu_context_t
* ctx
, unsigned vert
, unsigned frag
)
622 _cairo_gpu_context__set_vert(ctx
, vert
);
623 _cairo_gpu_context__set_frag(ctx
, frag
);
626 static __attribute__((unused
)) void
627 _cairo_gpu_context_set_frag_only(cairo_gpu_context_t
* ctx
, int frag
)
629 _cairo_gpu_context__set_frag(ctx
, frag
);
633 _cairo_gpu_context_set_translation(cairo_gpu_context_t
* ctx
, int dx
, int dy
)
635 if(dx
!= ctx
->dx
|| dy
!= ctx
->dy
)
637 ctx
->matrix_dirty
= 1;
645 _cairo_gpu_context_set_viewport(cairo_gpu_context_t
* ctx
, unsigned x
, unsigned y
, unsigned width
, unsigned height
)
647 if(ctx
->viewport_width
!= (int)width
|| ctx
->viewport_height
!= (int)height
|| ctx
->viewport_x
!= (int)x
|| ctx
->viewport_y
!= (int)y
)
649 float half_width
= width
/ 2.0;
650 float half_height
= height
/ 2.0;
652 struct pipe_viewport_state viewport
;
653 viewport
.scale
[0] = half_width
;
654 viewport
.scale
[1] = half_height
;
655 viewport
.scale
[2] = 1.0;
656 viewport
.scale
[3] = 1.0;
658 viewport
.translate
[0] = half_width
+ x
;
659 viewport
.translate
[1] = half_height
+ y
;
660 viewport
.translate
[2] = 1.0;
661 viewport
.translate
[3] = 0.0;
663 ctx
->pipe
->set_viewport_state(ctx
->pipe
, &viewport
);
665 ctx
->matrix_dirty
= 1;
669 ctx
->viewport_width
= width
;
670 ctx
->viewport_height
= height
;
674 static unsigned _cairo_gpu_pipe_blendfactors
[] =
676 PIPE_BLENDFACTOR_ZERO
,
677 PIPE_BLENDFACTOR_ONE
,
679 PIPE_BLENDFACTOR_SRC_COLOR
,
680 PIPE_BLENDFACTOR_INV_SRC_COLOR
,
681 PIPE_BLENDFACTOR_SRC_ALPHA
,
682 PIPE_BLENDFACTOR_INV_SRC_ALPHA
,
683 PIPE_BLENDFACTOR_DST_ALPHA
,
684 PIPE_BLENDFACTOR_INV_DST_ALPHA
,
685 PIPE_BLENDFACTOR_DST_COLOR
,
686 PIPE_BLENDFACTOR_INV_DST_COLOR
,
687 PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE
,
689 PIPE_BLENDFACTOR_CONST_COLOR
,
690 PIPE_BLENDFACTOR_INV_CONST_COLOR
,
691 PIPE_BLENDFACTOR_CONST_ALPHA
,
692 PIPE_BLENDFACTOR_INV_CONST_ALPHA
,
696 _cairo_gpu_context_set_blend(cairo_gpu_context_t
* ctx
, unsigned blendv
)
698 if((int)blendv
!= ctx
->blend
)
700 unsigned hash
= TABLE_BLEND
| blendv
;
701 void* p
= _cairo_gpu_space_lookup_ptr(ctx
->space
, hash
);
705 cairo_gpu_blend_t cblend
;
706 struct pipe_blend_state blend
;
710 // XXX: fix endianness??
711 blend
.blend_enable
= !(cblend
.func
== BLEND_FUNC_SOURCE
&& !cblend
.eq
);
713 blend
.rgb_func
= cblend
.eq
* (PIPE_BLEND_REVERSE_SUBTRACT
- PIPE_BLEND_ADD
) + PIPE_BLEND_ADD
;
714 blend
.rgb_src_factor
= _cairo_gpu_pipe_blendfactors
[cblend
.src_rgb
];
715 blend
.rgb_dst_factor
= _cairo_gpu_pipe_blendfactors
[cblend
.dst_rgb
];
717 blend
.alpha_func
= cblend
.eq
* (PIPE_BLEND_REVERSE_SUBTRACT
- PIPE_BLEND_ADD
) + PIPE_BLEND_ADD
;
718 blend
.alpha_src_factor
= _cairo_gpu_pipe_blendfactors
[cblend
.src_alpha
];
719 blend
.alpha_dst_factor
= _cairo_gpu_pipe_blendfactors
[cblend
.dst_alpha
];
720 blend
.logicop_enable
= 0;
722 blend
.logicop_func
= 0;
724 blend
.colormask
= cblend
.color_mask
;
727 CREATE_STATE_HASH(blend
);
730 ctx
->pipe
->bind_blend_state(ctx
->pipe
, p
);
735 static inline void _cairo_gpu_context_set_blend_color(cairo_gpu_context_t
* ctx
, cairo_gpu_color4_t
* blend_color
)
737 ctx
->pipe
->set_blend_color(ctx
->pipe
, (struct pipe_blend_color
*)blend_color
);
740 static inline void _cairo_gpu_context_set_raster(cairo_gpu_context_t
* ctx
, unsigned smooth
)
744 if((int)smooth
!= ctx
->smooth
)
748 pp
= &ctx
->space
->rasterizer
[smooth
];
752 struct pipe_rasterizer_state rasterizer
;
754 memset(&rasterizer
, 0, sizeof(rasterizer
));
755 rasterizer
.gl_rasterization_rules
= 1;
756 // TODO rasterizer.multisample;
757 rasterizer
.poly_smooth
= !!smooth
;
759 CREATE_STATE(rasterizer
);
762 ctx
->pipe
->bind_rasterizer_state(ctx
->pipe
, p
);
763 ctx
->smooth
= smooth
;
768 _cairo_gpu_context_fill_rect_unbound(cairo_gpu_context_t
* ctx
, cairo_gpu_surface_t
* surface
, int x
, int y
, int width
, int height
, float r
, float g
, float b
, float a
)
775 #ifndef WORDS_BIGENDIAN
776 unsigned char b
, g
, r
, a
;
778 unsigned char a
, r
, g
, b
;
783 c
.a
= _cairo_color_float_to_byte(a
);
784 c
.r
= _cairo_color_float_to_byte(r
);
785 c
.g
= _cairo_color_float_to_byte(g
);
786 c
.b
= _cairo_color_float_to_byte(b
);
788 assert(surface
->texture
.texture
->format
== PIPE_FORMAT_A8R8G8B8_UNORM
);
790 ctx
->pipe
->surface_fill(ctx
->pipe
, surface
->texture
.surface
, x
, y
, width
, height
, c
.value
);
793 #define _cairo_gpu_context_fill_rect _cairo_gpu_context_fill_rect_unbound
795 // must be destination
797 _cairo_gpu_context_fill(cairo_gpu_context_t
* ctx
, cairo_gpu_surface_t
* surface
, float r
, float g
, float b
, float a
)
799 // ignores color mask, but this is probably faster than using a quad with color mask
800 float color
[4] = {r
, g
, b
, a
};
801 ctx
->pipe
->clear(ctx
->pipe
, PIPE_CLEAR_COLOR
, color
, 0.0, 0);
805 _cairo_gpu_context_fill_unbound(cairo_gpu_context_t
* ctx
, cairo_gpu_surface_t
* surface
, float r
, float g
, float b
, float a
)
807 _cairo_gpu_context_fill_rect_unbound(ctx
, surface
, 0, 0, surface
->width
, surface
->height
, r
, g
, b
, a
);
811 _cairo_gpu_context_set_geometry(cairo_gpu_context_t
* ctx
, cairo_gpu_geometry_t
* geometry
)
813 _cairo_gpu_context_geometry_bind(ctx
, geometry
);
817 _cairo_gpu_context__emit_state(cairo_gpu_context_t
* ctx
)
821 if(ctx
->textures_dirty
)
823 for(ntex
= MAX_OPERANDS
; ntex
> 0; --ntex
)
825 if(ctx
->frag
& (FRAG_TEX_COLOR_MASK
<< (FRAG_TEX_SHIFT
+ (ntex
- 1) * FRAG_TEX_BITS
)))
829 ctx
->pipe
->set_sampler_textures(ctx
->pipe
, ntex
, ctx
->textures
);
830 ctx
->textures_dirty
= 0;
833 if(ctx
->ssamplers_dirty
)
835 for(ntex
= MAX_OPERANDS
; ntex
> 0; --ntex
)
837 if(ctx
->frag
& (FRAG_TEX_COLOR_MASK
<< (FRAG_TEX_SHIFT
+ (ntex
- 1) * FRAG_TEX_BITS
)))
841 ctx
->pipe
->bind_sampler_states(ctx
->pipe
, ntex
, ctx
->ssamplers
);
842 ctx
->ssamplers_dirty
= 0;
845 if(ctx
->matrix_dirty
)
847 float v
[4] = {2.0f
/ ctx
->viewport_width
, 2.0f
/ ctx
->viewport_height
, 2.0f
* (ctx
->dx
- ctx
->viewport_x
) / ctx
->viewport_width
- 1.0f
, 2.0f
* (ctx
->dy
- ctx
->viewport_y
) / ctx
->viewport_height
- 1.0f
};
848 _cairo_gpu_context_set_vert_param(ctx
, VERTENV_MATRIX
, v
);
849 ctx
->matrix_dirty
= 0;
852 // create a new buffer each time like Mesa state tracker, to avoid render races
853 if(ctx
->vert_constants_dirty
)
855 pipe_buffer_reference(&ctx
->vert_cbuffer
.buffer
, NULL
);
856 ctx
->vert_cbuffer
.buffer
= ctx
->space
->screen
->buffer_create(ctx
->space
->screen
, 16, PIPE_BUFFER_USAGE_CONSTANT
, sizeof(ctx
->vert_constants
));
857 pipe_buffer_write(ctx
->space
->screen
, ctx
->vert_cbuffer
.buffer
, 0, sizeof(ctx
->vert_constants
), ctx
->vert_constants
);
858 ctx
->pipe
->set_constant_buffer(ctx
->pipe
, PIPE_SHADER_VERTEX
, 0, &ctx
->vert_cbuffer
);
859 ctx
->vert_constants_dirty
= 0;
862 if(ctx
->frag_constants_dirty
)
864 pipe_buffer_reference(&ctx
->frag_cbuffer
.buffer
, NULL
);
865 ctx
->frag_cbuffer
.buffer
= ctx
->space
->screen
->buffer_create(ctx
->space
->screen
, 16, PIPE_BUFFER_USAGE_CONSTANT
, sizeof(ctx
->frag_constants
));
866 pipe_buffer_write(ctx
->space
->screen
, ctx
->frag_cbuffer
.buffer
, 0, sizeof(ctx
->frag_constants
), ctx
->frag_constants
);
867 ctx
->pipe
->set_constant_buffer(ctx
->pipe
, PIPE_SHADER_FRAGMENT
, 0, &ctx
->frag_cbuffer
);
868 ctx
->frag_constants_dirty
= 0;
874 _cairo_gpu_context_draw(cairo_gpu_context_t
* ctx
)
876 _cairo_gpu_context__emit_state(ctx
);
877 ctx
->pipe
->draw_arrays(ctx
->pipe
, ctx
->mode
, 0, ctx
->count
);
881 _cairo_gpu_context_blit_1to1(cairo_gpu_context_t
* ctx
, cairo_gpu_surface_t
* dst
, int dx
, int dy
, int sx
, int sy
, int w
, int h
)
883 // surfaces must be the same format. Currently, this always hold.
884 ctx
->pipe
->surface_copy(ctx
->pipe
, dst
->texture
.surface
, dx
, dy
, ctx
->read_surface
, sx
, sy
, w
, h
);
888 _cairo_gpu_context_blit_same(cairo_gpu_context_t
* ctx
, cairo_gpu_surface_t
* dst
, int x
, int y
, int w
, int h
)
890 _cairo_gpu_context_blit_1to1(ctx
, dst
, x
, y
, x
, y
, w
, h
);
894 _cairo_gpu_blit_image(cairo_gpu_context_t
* ctx
, cairo_gpu_surface_t
* dst
, cairo_gpu_texture_t
* texture
, int dst_x
, int dst_y
, int src_x
, int src_y
, int width
, int height
, int zoom_x
, int zoom_y
);
897 _cairo_gpu_context_blit_zoom(cairo_gpu_context_t
* ctx
, cairo_gpu_surface_t
* dst
, int dx
, int dy
, int sx
, int sy
, int sw
, int sh
, int zx
, int zy
)
902 if(zx
== 1 && zy
== 1)
903 _cairo_gpu_context_blit_1to1(ctx
, dst
, dx
, dy
, sx
, sy
, sw
, sh
);
906 assert(ctx
->read_texture
);
907 _cairo_gpu_blit_image(ctx
, dst
, ctx
->read_texture
, dx
, dy
, sx
, sy
, sw
, sh
, zx
, zy
);
911 #define _cairo_gpu_context_draw_rect _cairo_gpu_emulate_draw_rect
913 #include "cairo-gpu-impl-context-emulate.h"
916 _cairo_gpu_context_read_to_texture(cairo_gpu_context_t
* ctx
, cairo_gpu_surface_t
* dst
, int dx
, int dy
, int sx
, int sy
, int w
, int h
)
918 ctx
->pipe
->surface_copy(ctx
->pipe
, dst
->texture
.surface
, dx
, dy
, ctx
->read_surface
, sx
, sy
, w
, h
);