1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
4 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
31 * Code generate the whole fragment pipeline.
33 * The fragment pipeline consists of the following stages:
34 * - triangle edge in/out testing
40 * - depth/stencil test (stencil TBI)
43 * This file has only the glue to assemble the fragment pipeline. The actual
44 * plumbing of converting Gallium state into LLVM IR is done elsewhere, in the
45 * lp_bld_*.[ch] files, and in a complete generic and reusable way. Here we
46 * muster the LLVM JIT execution engine to create a function that follows an
47 * established binary interface and that can be called from C directly.
49 * A big source of complexity here is that we often want to run different
50 * stages with different precisions and data types and precisions. For example,
51 * the fragment shader needs typically to be done in floats, but the
52 * depth/stencil test and blending is better done in the type that most closely
53 * matches the depth/stencil and color buffer respectively.
55 * Since the width of a SIMD vector register stays the same regardless of the
56 * element type, different types imply different number of elements, so we must
57 * code generate more instances of the stages with larger types to be able to
58 * feed/consume the stages with smaller types.
60 * @author Jose Fonseca <jfonseca@vmware.com>
64 #include "pipe/p_defines.h"
65 #include "util/u_inlines.h"
66 #include "util/u_memory.h"
67 #include "util/u_format.h"
68 #include "util/u_dump.h"
69 #include "os/os_time.h"
70 #include "pipe/p_shader_tokens.h"
71 #include "draw/draw_context.h"
72 #include "tgsi/tgsi_dump.h"
73 #include "tgsi/tgsi_scan.h"
74 #include "tgsi/tgsi_parse.h"
75 #include "gallivm/lp_bld_type.h"
76 #include "gallivm/lp_bld_const.h"
77 #include "gallivm/lp_bld_conv.h"
78 #include "gallivm/lp_bld_intr.h"
79 #include "gallivm/lp_bld_logic.h"
80 #include "gallivm/lp_bld_depth.h"
81 #include "gallivm/lp_bld_interp.h"
82 #include "gallivm/lp_bld_tgsi.h"
83 #include "gallivm/lp_bld_alpha.h"
84 #include "gallivm/lp_bld_blend.h"
85 #include "gallivm/lp_bld_swizzle.h"
86 #include "gallivm/lp_bld_flow.h"
87 #include "gallivm/lp_bld_debug.h"
88 #include "lp_buffer.h"
89 #include "lp_context.h"
92 #include "lp_screen.h"
95 #include "lp_tex_sample.h"
98 #include <llvm-c/Analysis.h>
101 static const unsigned char quad_offset_x
[4] = {0, 1, 0, 1};
102 static const unsigned char quad_offset_y
[4] = {0, 0, 1, 1};
106 * Derive from the quad's upper left scalar coordinates the coordinates for
107 * all other quad pixels
110 generate_pos0(LLVMBuilderRef builder
,
116 LLVMTypeRef int_elem_type
= LLVMInt32Type();
117 LLVMTypeRef int_vec_type
= LLVMVectorType(int_elem_type
, QUAD_SIZE
);
118 LLVMTypeRef elem_type
= LLVMFloatType();
119 LLVMTypeRef vec_type
= LLVMVectorType(elem_type
, QUAD_SIZE
);
120 LLVMValueRef x_offsets
[QUAD_SIZE
];
121 LLVMValueRef y_offsets
[QUAD_SIZE
];
124 x
= lp_build_broadcast(builder
, int_vec_type
, x
);
125 y
= lp_build_broadcast(builder
, int_vec_type
, y
);
127 for(i
= 0; i
< QUAD_SIZE
; ++i
) {
128 x_offsets
[i
] = LLVMConstInt(int_elem_type
, quad_offset_x
[i
], 0);
129 y_offsets
[i
] = LLVMConstInt(int_elem_type
, quad_offset_y
[i
], 0);
132 x
= LLVMBuildAdd(builder
, x
, LLVMConstVector(x_offsets
, QUAD_SIZE
), "");
133 y
= LLVMBuildAdd(builder
, y
, LLVMConstVector(y_offsets
, QUAD_SIZE
), "");
135 *x0
= LLVMBuildSIToFP(builder
, x
, vec_type
, "");
136 *y0
= LLVMBuildSIToFP(builder
, y
, vec_type
, "");
141 * Generate the depth test.
144 generate_depth(LLVMBuilderRef builder
,
145 const struct lp_fragment_shader_variant_key
*key
,
146 struct lp_type src_type
,
147 struct lp_build_mask_context
*mask
,
149 LLVMValueRef dst_ptr
)
151 const struct util_format_description
*format_desc
;
152 struct lp_type dst_type
;
154 if(!key
->depth
.enabled
)
157 format_desc
= util_format_description(key
->zsbuf_format
);
161 * Depths are expected to be between 0 and 1, even if they are stored in
162 * floats. Setting these bits here will ensure that the lp_build_conv() call
163 * below won't try to unnecessarily clamp the incoming values.
165 if(src_type
.floating
) {
166 src_type
.sign
= FALSE
;
167 src_type
.norm
= TRUE
;
170 assert(!src_type
.sign
);
171 assert(src_type
.norm
);
174 /* Pick the depth type. */
175 dst_type
= lp_depth_type(format_desc
, src_type
.width
*src_type
.length
);
177 /* FIXME: Cope with a depth test type with a different bit width. */
178 assert(dst_type
.width
== src_type
.width
);
179 assert(dst_type
.length
== src_type
.length
);
181 lp_build_conv(builder
, src_type
, dst_type
, &src
, 1, &src
, 1);
183 dst_ptr
= LLVMBuildBitCast(builder
,
185 LLVMPointerType(lp_build_vec_type(dst_type
), 0), "");
187 lp_build_depth_test(builder
,
198 * Generate the code to do inside/outside triangle testing for the
199 * four pixels in a 2x2 quad. This will set the four elements of the
200 * quad mask vector to 0 or ~0.
201 * \param i which quad of the quad group to test, in [0,3]
204 generate_tri_edge_mask(LLVMBuilderRef builder
,
206 LLVMValueRef
*mask
, /* ivec4, out */
207 LLVMValueRef c0
, /* int32 */
208 LLVMValueRef c1
, /* int32 */
209 LLVMValueRef c2
, /* int32 */
210 LLVMValueRef step0_ptr
, /* ivec4 */
211 LLVMValueRef step1_ptr
, /* ivec4 */
212 LLVMValueRef step2_ptr
) /* ivec4 */
214 #define OPTIMIZE_IN_OUT_TEST 0
215 #if OPTIMIZE_IN_OUT_TEST
216 struct lp_build_if_state ifctx
;
217 LLVMValueRef not_draw_all
;
219 struct lp_build_flow_context
*flow
;
220 struct lp_type i32_type
;
221 LLVMTypeRef i32vec4_type
, mask_type
;
222 LLVMValueRef c0_vec
, c1_vec
, c2_vec
;
223 LLVMValueRef in_out_mask
;
227 /* int32 vector type */
228 memset(&i32_type
, 0, sizeof i32_type
);
229 i32_type
.floating
= FALSE
; /* values are integers */
230 i32_type
.sign
= TRUE
; /* values are signed */
231 i32_type
.norm
= FALSE
; /* values are not normalized */
232 i32_type
.width
= 32; /* 32-bit int values */
233 i32_type
.length
= 4; /* 4 elements per vector */
235 i32vec4_type
= lp_build_int32_vec4_type();
237 mask_type
= LLVMIntType(32 * 4);
240 * Use a conditional here to do detailed pixel in/out testing.
241 * We only have to do this if c0 != INT_MIN.
243 flow
= lp_build_flow_create(builder
);
244 lp_build_flow_scope_begin(flow
);
247 #if OPTIMIZE_IN_OUT_TEST
248 /* not_draw_all = (c0 != INT_MIN) */
249 not_draw_all
= LLVMBuildICmp(builder
,
252 LLVMConstInt(LLVMInt32Type(), INT_MIN
, 0),
255 in_out_mask
= lp_build_const_int_vec(i32_type
, ~0);
258 lp_build_flow_scope_declare(flow
, &in_out_mask
);
260 /* if (not_draw_all) {... */
261 lp_build_if(&ifctx
, flow
, builder
, not_draw_all
);
264 LLVMValueRef step0_vec
, step1_vec
, step2_vec
;
265 LLVMValueRef m0_vec
, m1_vec
, m2_vec
;
266 LLVMValueRef index
, m
;
268 /* c0_vec = {c0, c0, c0, c0}
269 * Note that we emit this code four times but LLVM optimizes away
270 * three instances of it.
272 c0_vec
= lp_build_broadcast(builder
, i32vec4_type
, c0
);
273 c1_vec
= lp_build_broadcast(builder
, i32vec4_type
, c1
);
274 c2_vec
= lp_build_broadcast(builder
, i32vec4_type
, c2
);
275 lp_build_name(c0_vec
, "edgeconst0vec");
276 lp_build_name(c1_vec
, "edgeconst1vec");
277 lp_build_name(c2_vec
, "edgeconst2vec");
279 /* load step0vec, step1, step2 vec from memory */
280 index
= LLVMConstInt(LLVMInt32Type(), i
, 0);
281 step0_vec
= LLVMBuildLoad(builder
, LLVMBuildGEP(builder
, step0_ptr
, &index
, 1, ""), "");
282 step1_vec
= LLVMBuildLoad(builder
, LLVMBuildGEP(builder
, step1_ptr
, &index
, 1, ""), "");
283 step2_vec
= LLVMBuildLoad(builder
, LLVMBuildGEP(builder
, step2_ptr
, &index
, 1, ""), "");
284 lp_build_name(step0_vec
, "step0vec");
285 lp_build_name(step1_vec
, "step1vec");
286 lp_build_name(step2_vec
, "step2vec");
288 /* m0_vec = step0_ptr[i] > c0_vec */
289 m0_vec
= lp_build_compare(builder
, i32_type
, PIPE_FUNC_GREATER
, step0_vec
, c0_vec
);
290 m1_vec
= lp_build_compare(builder
, i32_type
, PIPE_FUNC_GREATER
, step1_vec
, c1_vec
);
291 m2_vec
= lp_build_compare(builder
, i32_type
, PIPE_FUNC_GREATER
, step2_vec
, c2_vec
);
293 /* in_out_mask = m0_vec & m1_vec & m2_vec */
294 m
= LLVMBuildAnd(builder
, m0_vec
, m1_vec
, "");
295 in_out_mask
= LLVMBuildAnd(builder
, m
, m2_vec
, "");
296 lp_build_name(in_out_mask
, "inoutmaskvec");
298 #if OPTIMIZE_IN_OUT_TEST
299 lp_build_endif(&ifctx
);
303 lp_build_flow_scope_end(flow
);
304 lp_build_flow_destroy(flow
);
306 /* This is the initial alive/dead pixel mask for a quad of four pixels.
307 * It's an int[4] vector with each word set to 0 or ~0.
308 * Words will get cleared when pixels faile the Z test, etc.
315 generate_scissor_test(LLVMBuilderRef builder
,
316 LLVMValueRef context_ptr
,
317 const struct lp_build_interp_soa_context
*interp
,
320 LLVMTypeRef vec_type
= lp_build_vec_type(type
);
321 LLVMValueRef xpos
= interp
->pos
[0], ypos
= interp
->pos
[1];
322 LLVMValueRef xmin
, ymin
, xmax
, ymax
;
323 LLVMValueRef m0
, m1
, m2
, m3
, m
;
325 /* xpos, ypos contain the window coords for the four pixels in the quad */
329 /* get the current scissor bounds, convert to vectors */
330 xmin
= lp_jit_context_scissor_xmin_value(builder
, context_ptr
);
331 xmin
= lp_build_broadcast(builder
, vec_type
, xmin
);
333 ymin
= lp_jit_context_scissor_ymin_value(builder
, context_ptr
);
334 ymin
= lp_build_broadcast(builder
, vec_type
, ymin
);
336 xmax
= lp_jit_context_scissor_xmax_value(builder
, context_ptr
);
337 xmax
= lp_build_broadcast(builder
, vec_type
, xmax
);
339 ymax
= lp_jit_context_scissor_ymax_value(builder
, context_ptr
);
340 ymax
= lp_build_broadcast(builder
, vec_type
, ymax
);
342 /* compare the fragment's position coordinates against the scissor bounds */
343 m0
= lp_build_compare(builder
, type
, PIPE_FUNC_GEQUAL
, xpos
, xmin
);
344 m1
= lp_build_compare(builder
, type
, PIPE_FUNC_GEQUAL
, ypos
, ymin
);
345 m2
= lp_build_compare(builder
, type
, PIPE_FUNC_LESS
, xpos
, xmax
);
346 m3
= lp_build_compare(builder
, type
, PIPE_FUNC_LESS
, ypos
, ymax
);
348 /* AND all the masks together */
349 m
= LLVMBuildAnd(builder
, m0
, m1
, "");
350 m
= LLVMBuildAnd(builder
, m
, m2
, "");
351 m
= LLVMBuildAnd(builder
, m
, m3
, "");
353 lp_build_name(m
, "scissormask");
360 build_int32_vec_const(int value
)
362 struct lp_type i32_type
;
364 memset(&i32_type
, 0, sizeof i32_type
);
365 i32_type
.floating
= FALSE
; /* values are integers */
366 i32_type
.sign
= TRUE
; /* values are signed */
367 i32_type
.norm
= FALSE
; /* values are not normalized */
368 i32_type
.width
= 32; /* 32-bit int values */
369 i32_type
.length
= 4; /* 4 elements per vector */
370 return lp_build_const_int_vec(i32_type
, value
);
376 * Generate the fragment shader, depth/stencil test, and alpha tests.
377 * \param i which quad in the tile, in range [0,3]
378 * \param do_tri_test if 1, do triangle edge in/out testing
381 generate_fs(struct llvmpipe_context
*lp
,
382 struct lp_fragment_shader
*shader
,
383 const struct lp_fragment_shader_variant_key
*key
,
384 LLVMBuilderRef builder
,
386 LLVMValueRef context_ptr
,
388 const struct lp_build_interp_soa_context
*interp
,
389 struct lp_build_sampler_soa
*sampler
,
391 LLVMValueRef (*color
)[4],
392 LLVMValueRef depth_ptr
,
393 unsigned do_tri_test
,
397 LLVMValueRef step0_ptr
,
398 LLVMValueRef step1_ptr
,
399 LLVMValueRef step2_ptr
)
401 const struct tgsi_token
*tokens
= shader
->base
.tokens
;
402 LLVMTypeRef elem_type
;
403 LLVMTypeRef vec_type
;
404 LLVMTypeRef int_vec_type
;
405 LLVMValueRef consts_ptr
;
406 LLVMValueRef outputs
[PIPE_MAX_SHADER_OUTPUTS
][NUM_CHANNELS
];
407 LLVMValueRef z
= interp
->pos
[2];
408 struct lp_build_flow_context
*flow
;
409 struct lp_build_mask_context mask
;
410 boolean early_depth_test
;
417 elem_type
= lp_build_elem_type(type
);
418 vec_type
= lp_build_vec_type(type
);
419 int_vec_type
= lp_build_int_vec_type(type
);
421 consts_ptr
= lp_jit_context_constants(builder
, context_ptr
);
423 flow
= lp_build_flow_create(builder
);
425 memset(outputs
, 0, sizeof outputs
);
427 lp_build_flow_scope_begin(flow
);
429 /* Declare the color and z variables */
430 for(cbuf
= 0; cbuf
< key
->nr_cbufs
; cbuf
++) {
431 for(chan
= 0; chan
< NUM_CHANNELS
; ++chan
) {
432 color
[cbuf
][chan
] = LLVMGetUndef(vec_type
);
433 lp_build_flow_scope_declare(flow
, &color
[cbuf
][chan
]);
436 lp_build_flow_scope_declare(flow
, &z
);
438 /* do triangle edge testing */
440 generate_tri_edge_mask(builder
, i
, pmask
,
441 c0
, c1
, c2
, step0_ptr
, step1_ptr
, step2_ptr
);
444 *pmask
= build_int32_vec_const(~0);
447 /* 'mask' will control execution based on quad's pixel alive/killed state */
448 lp_build_mask_begin(&mask
, flow
, type
, *pmask
);
452 generate_scissor_test(builder
, context_ptr
, interp
, type
);
453 lp_build_mask_update(&mask
, smask
);
457 key
->depth
.enabled
&&
458 !key
->alpha
.enabled
&&
459 !shader
->info
.uses_kill
&&
460 !shader
->info
.writes_z
;
463 generate_depth(builder
, key
,
467 lp_build_tgsi_soa(builder
, tokens
, type
, &mask
,
468 consts_ptr
, interp
->pos
, interp
->inputs
,
471 for (attrib
= 0; attrib
< shader
->info
.num_outputs
; ++attrib
) {
472 for(chan
= 0; chan
< NUM_CHANNELS
; ++chan
) {
473 if(outputs
[attrib
][chan
]) {
474 LLVMValueRef out
= LLVMBuildLoad(builder
, outputs
[attrib
][chan
], "");
475 lp_build_name(out
, "output%u.%u.%c", i
, attrib
, "xyzw"[chan
]);
477 switch (shader
->info
.output_semantic_name
[attrib
]) {
478 case TGSI_SEMANTIC_COLOR
:
480 unsigned cbuf
= shader
->info
.output_semantic_index
[attrib
];
482 lp_build_name(out
, "color%u.%u.%c", i
, attrib
, "rgba"[chan
]);
485 /* XXX: should the alpha reference value be passed separately? */
486 /* XXX: should only test the final assignment to alpha */
487 if(cbuf
== 0 && chan
== 3) {
488 LLVMValueRef alpha
= out
;
489 LLVMValueRef alpha_ref_value
;
490 alpha_ref_value
= lp_jit_context_alpha_ref_value(builder
, context_ptr
);
491 alpha_ref_value
= lp_build_broadcast(builder
, vec_type
, alpha_ref_value
);
492 lp_build_alpha_test(builder
, &key
->alpha
, type
,
493 &mask
, alpha
, alpha_ref_value
);
496 color
[cbuf
][chan
] = out
;
500 case TGSI_SEMANTIC_POSITION
:
509 if(!early_depth_test
)
510 generate_depth(builder
, key
,
514 lp_build_mask_end(&mask
);
516 lp_build_flow_scope_end(flow
);
518 lp_build_flow_destroy(flow
);
526 * Generate color blending and color output.
529 generate_blend(const struct pipe_blend_state
*blend
,
530 LLVMBuilderRef builder
,
532 LLVMValueRef context_ptr
,
535 LLVMValueRef dst_ptr
)
537 struct lp_build_context bld
;
538 struct lp_build_flow_context
*flow
;
539 struct lp_build_mask_context mask_ctx
;
540 LLVMTypeRef vec_type
;
541 LLVMTypeRef int_vec_type
;
542 LLVMValueRef const_ptr
;
548 lp_build_context_init(&bld
, builder
, type
);
550 flow
= lp_build_flow_create(builder
);
552 /* we'll use this mask context to skip blending if all pixels are dead */
553 lp_build_mask_begin(&mask_ctx
, flow
, type
, mask
);
555 vec_type
= lp_build_vec_type(type
);
556 int_vec_type
= lp_build_int_vec_type(type
);
558 const_ptr
= lp_jit_context_blend_color(builder
, context_ptr
);
559 const_ptr
= LLVMBuildBitCast(builder
, const_ptr
,
560 LLVMPointerType(vec_type
, 0), "");
562 for(chan
= 0; chan
< 4; ++chan
) {
563 LLVMValueRef index
= LLVMConstInt(LLVMInt32Type(), chan
, 0);
564 con
[chan
] = LLVMBuildLoad(builder
, LLVMBuildGEP(builder
, const_ptr
, &index
, 1, ""), "");
566 dst
[chan
] = LLVMBuildLoad(builder
, LLVMBuildGEP(builder
, dst_ptr
, &index
, 1, ""), "");
568 lp_build_name(con
[chan
], "con.%c", "rgba"[chan
]);
569 lp_build_name(dst
[chan
], "dst.%c", "rgba"[chan
]);
572 lp_build_blend_soa(builder
, blend
, type
, src
, dst
, con
, res
);
574 for(chan
= 0; chan
< 4; ++chan
) {
575 if(blend
->rt
[0].colormask
& (1 << chan
)) {
576 LLVMValueRef index
= LLVMConstInt(LLVMInt32Type(), chan
, 0);
577 lp_build_name(res
[chan
], "res.%c", "rgba"[chan
]);
578 res
[chan
] = lp_build_select(&bld
, mask
, res
[chan
], dst
[chan
]);
579 LLVMBuildStore(builder
, res
[chan
], LLVMBuildGEP(builder
, dst_ptr
, &index
, 1, ""));
583 lp_build_mask_end(&mask_ctx
);
584 lp_build_flow_destroy(flow
);
589 * Generate the runtime callable function for the whole fragment pipeline.
590 * Note that the function which we generate operates on a block of 16
591 * pixels at at time. The block contains 2x2 quads. Each quad contains
595 generate_fragment(struct llvmpipe_context
*lp
,
596 struct lp_fragment_shader
*shader
,
597 struct lp_fragment_shader_variant
*variant
,
598 unsigned do_tri_test
)
600 struct llvmpipe_screen
*screen
= llvmpipe_screen(lp
->pipe
.screen
);
601 const struct lp_fragment_shader_variant_key
*key
= &variant
->key
;
602 struct lp_type fs_type
;
603 struct lp_type blend_type
;
604 LLVMTypeRef fs_elem_type
;
605 LLVMTypeRef fs_vec_type
;
606 LLVMTypeRef fs_int_vec_type
;
607 LLVMTypeRef blend_vec_type
;
608 LLVMTypeRef blend_int_vec_type
;
609 LLVMTypeRef arg_types
[14];
610 LLVMTypeRef func_type
;
611 LLVMTypeRef int32_vec4_type
= lp_build_int32_vec4_type();
612 LLVMValueRef context_ptr
;
616 LLVMValueRef dadx_ptr
;
617 LLVMValueRef dady_ptr
;
618 LLVMValueRef color_ptr_ptr
;
619 LLVMValueRef depth_ptr
;
620 LLVMValueRef c0
, c1
, c2
, step0_ptr
, step1_ptr
, step2_ptr
;
621 LLVMBasicBlockRef block
;
622 LLVMBuilderRef builder
;
625 struct lp_build_sampler_soa
*sampler
;
626 struct lp_build_interp_soa_context interp
;
627 LLVMValueRef fs_mask
[LP_MAX_VECTOR_LENGTH
];
628 LLVMValueRef fs_out_color
[PIPE_MAX_COLOR_BUFS
][NUM_CHANNELS
][LP_MAX_VECTOR_LENGTH
];
629 LLVMValueRef blend_mask
;
630 LLVMValueRef blend_in_color
[NUM_CHANNELS
];
631 LLVMValueRef function
;
638 /* TODO: actually pick these based on the fs and color buffer
639 * characteristics. */
641 memset(&fs_type
, 0, sizeof fs_type
);
642 fs_type
.floating
= TRUE
; /* floating point values */
643 fs_type
.sign
= TRUE
; /* values are signed */
644 fs_type
.norm
= FALSE
; /* values are not limited to [0,1] or [-1,1] */
645 fs_type
.width
= 32; /* 32-bit float */
646 fs_type
.length
= 4; /* 4 elements per vector */
647 num_fs
= 4; /* number of quads per block */
649 memset(&blend_type
, 0, sizeof blend_type
);
650 blend_type
.floating
= FALSE
; /* values are integers */
651 blend_type
.sign
= FALSE
; /* values are unsigned */
652 blend_type
.norm
= TRUE
; /* values are in [0,1] or [-1,1] */
653 blend_type
.width
= 8; /* 8-bit ubyte values */
654 blend_type
.length
= 16; /* 16 elements per vector */
657 * Generate the function prototype. Any change here must be reflected in
658 * lp_jit.h's lp_jit_frag_func function pointer type, and vice-versa.
661 fs_elem_type
= lp_build_elem_type(fs_type
);
662 fs_vec_type
= lp_build_vec_type(fs_type
);
663 fs_int_vec_type
= lp_build_int_vec_type(fs_type
);
665 blend_vec_type
= lp_build_vec_type(blend_type
);
666 blend_int_vec_type
= lp_build_int_vec_type(blend_type
);
668 arg_types
[0] = screen
->context_ptr_type
; /* context */
669 arg_types
[1] = LLVMInt32Type(); /* x */
670 arg_types
[2] = LLVMInt32Type(); /* y */
671 arg_types
[3] = LLVMPointerType(fs_elem_type
, 0); /* a0 */
672 arg_types
[4] = LLVMPointerType(fs_elem_type
, 0); /* dadx */
673 arg_types
[5] = LLVMPointerType(fs_elem_type
, 0); /* dady */
674 arg_types
[6] = LLVMPointerType(LLVMPointerType(blend_vec_type
, 0), 0); /* color */
675 arg_types
[7] = LLVMPointerType(fs_int_vec_type
, 0); /* depth */
676 arg_types
[8] = LLVMInt32Type(); /* c0 */
677 arg_types
[9] = LLVMInt32Type(); /* c1 */
678 arg_types
[10] = LLVMInt32Type(); /* c2 */
679 /* Note: the step arrays are built as int32[16] but we interpret
680 * them here as int32_vec4[4].
682 arg_types
[11] = LLVMPointerType(int32_vec4_type
, 0);/* step0 */
683 arg_types
[12] = LLVMPointerType(int32_vec4_type
, 0);/* step1 */
684 arg_types
[13] = LLVMPointerType(int32_vec4_type
, 0);/* step2 */
686 func_type
= LLVMFunctionType(LLVMVoidType(), arg_types
, Elements(arg_types
), 0);
688 function
= LLVMAddFunction(screen
->module
, "shader", func_type
);
689 LLVMSetFunctionCallConv(function
, LLVMCCallConv
);
691 variant
->function
[do_tri_test
] = function
;
694 /* XXX: need to propagate noalias down into color param now we are
695 * passing a pointer-to-pointer?
697 for(i
= 0; i
< Elements(arg_types
); ++i
)
698 if(LLVMGetTypeKind(arg_types
[i
]) == LLVMPointerTypeKind
)
699 LLVMAddAttribute(LLVMGetParam(function
, i
), LLVMNoAliasAttribute
);
701 context_ptr
= LLVMGetParam(function
, 0);
702 x
= LLVMGetParam(function
, 1);
703 y
= LLVMGetParam(function
, 2);
704 a0_ptr
= LLVMGetParam(function
, 3);
705 dadx_ptr
= LLVMGetParam(function
, 4);
706 dady_ptr
= LLVMGetParam(function
, 5);
707 color_ptr_ptr
= LLVMGetParam(function
, 6);
708 depth_ptr
= LLVMGetParam(function
, 7);
709 c0
= LLVMGetParam(function
, 8);
710 c1
= LLVMGetParam(function
, 9);
711 c2
= LLVMGetParam(function
, 10);
712 step0_ptr
= LLVMGetParam(function
, 11);
713 step1_ptr
= LLVMGetParam(function
, 12);
714 step2_ptr
= LLVMGetParam(function
, 13);
716 lp_build_name(context_ptr
, "context");
717 lp_build_name(x
, "x");
718 lp_build_name(y
, "y");
719 lp_build_name(a0_ptr
, "a0");
720 lp_build_name(dadx_ptr
, "dadx");
721 lp_build_name(dady_ptr
, "dady");
722 lp_build_name(color_ptr_ptr
, "color_ptr");
723 lp_build_name(depth_ptr
, "depth");
724 lp_build_name(c0
, "c0");
725 lp_build_name(c1
, "c1");
726 lp_build_name(c2
, "c2");
727 lp_build_name(step0_ptr
, "step0");
728 lp_build_name(step1_ptr
, "step1");
729 lp_build_name(step2_ptr
, "step2");
735 block
= LLVMAppendBasicBlock(function
, "entry");
736 builder
= LLVMCreateBuilder();
737 LLVMPositionBuilderAtEnd(builder
, block
);
739 generate_pos0(builder
, x
, y
, &x0
, &y0
);
741 lp_build_interp_soa_init(&interp
,
745 a0_ptr
, dadx_ptr
, dady_ptr
,
748 /* code generated texture sampling */
749 sampler
= lp_llvm_sampler_soa_create(key
->sampler
, context_ptr
);
751 /* loop over quads in the block */
752 for(i
= 0; i
< num_fs
; ++i
) {
753 LLVMValueRef index
= LLVMConstInt(LLVMInt32Type(), i
, 0);
754 LLVMValueRef out_color
[PIPE_MAX_COLOR_BUFS
][NUM_CHANNELS
];
755 LLVMValueRef depth_ptr_i
;
759 lp_build_interp_soa_update(&interp
, i
);
761 depth_ptr_i
= LLVMBuildGEP(builder
, depth_ptr
, &index
, 1, "");
763 generate_fs(lp
, shader
, key
,
770 &fs_mask
[i
], /* output */
775 step0_ptr
, step1_ptr
, step2_ptr
);
777 for(cbuf
= 0; cbuf
< key
->nr_cbufs
; cbuf
++)
778 for(chan
= 0; chan
< NUM_CHANNELS
; ++chan
)
779 fs_out_color
[cbuf
][chan
][i
] = out_color
[cbuf
][chan
];
782 sampler
->destroy(sampler
);
784 /* Loop over color outputs / color buffers to do blending.
786 for(cbuf
= 0; cbuf
< key
->nr_cbufs
; cbuf
++) {
787 LLVMValueRef color_ptr
;
788 LLVMValueRef index
= LLVMConstInt(LLVMInt32Type(), cbuf
, 0);
791 * Convert the fs's output color and mask to fit to the blending type.
793 for(chan
= 0; chan
< NUM_CHANNELS
; ++chan
) {
794 lp_build_conv(builder
, fs_type
, blend_type
,
795 fs_out_color
[cbuf
][chan
], num_fs
,
796 &blend_in_color
[chan
], 1);
797 lp_build_name(blend_in_color
[chan
], "color%d.%c", cbuf
, "rgba"[chan
]);
800 lp_build_conv_mask(builder
, fs_type
, blend_type
,
804 color_ptr
= LLVMBuildLoad(builder
,
805 LLVMBuildGEP(builder
, color_ptr_ptr
, &index
, 1, ""),
807 lp_build_name(color_ptr
, "color_ptr%d", cbuf
);
812 generate_blend(&key
->blend
,
821 LLVMBuildRetVoid(builder
);
823 LLVMDisposeBuilder(builder
);
826 /* Verify the LLVM IR. If invalid, dump and abort */
828 if(LLVMVerifyFunction(function
, LLVMPrintMessageAction
)) {
830 LLVMDumpValue(function
);
835 /* Apply optimizations to LLVM IR */
837 LLVMRunFunctionPassManager(screen
->pass
, function
);
839 if (LP_DEBUG
& DEBUG_JIT
) {
840 /* Print the LLVM IR to stderr */
841 LLVMDumpValue(function
);
846 * Translate the LLVM IR into machine code.
848 variant
->jit_function
[do_tri_test
] = (lp_jit_frag_func
)LLVMGetPointerToGlobal(screen
->engine
, function
);
850 if (LP_DEBUG
& DEBUG_ASM
)
851 lp_disassemble(variant
->jit_function
[do_tri_test
]);
855 static struct lp_fragment_shader_variant
*
856 generate_variant(struct llvmpipe_context
*lp
,
857 struct lp_fragment_shader
*shader
,
858 const struct lp_fragment_shader_variant_key
*key
)
860 struct lp_fragment_shader_variant
*variant
;
862 if (LP_DEBUG
& DEBUG_JIT
) {
865 tgsi_dump(shader
->base
.tokens
, 0);
866 if(key
->depth
.enabled
) {
867 debug_printf("depth.format = %s\n", util_format_name(key
->zsbuf_format
));
868 debug_printf("depth.func = %s\n", util_dump_func(key
->depth
.func
, TRUE
));
869 debug_printf("depth.writemask = %u\n", key
->depth
.writemask
);
871 if(key
->alpha
.enabled
) {
872 debug_printf("alpha.func = %s\n", util_dump_func(key
->alpha
.func
, TRUE
));
873 debug_printf("alpha.ref_value = %f\n", key
->alpha
.ref_value
);
875 if(key
->blend
.logicop_enable
) {
876 debug_printf("blend.logicop_func = %u\n", key
->blend
.logicop_func
);
878 else if(key
->blend
.rt
[0].blend_enable
) {
879 debug_printf("blend.rgb_func = %s\n", util_dump_blend_func (key
->blend
.rt
[0].rgb_func
, TRUE
));
880 debug_printf("rgb_src_factor = %s\n", util_dump_blend_factor(key
->blend
.rt
[0].rgb_src_factor
, TRUE
));
881 debug_printf("rgb_dst_factor = %s\n", util_dump_blend_factor(key
->blend
.rt
[0].rgb_dst_factor
, TRUE
));
882 debug_printf("alpha_func = %s\n", util_dump_blend_func (key
->blend
.rt
[0].alpha_func
, TRUE
));
883 debug_printf("alpha_src_factor = %s\n", util_dump_blend_factor(key
->blend
.rt
[0].alpha_src_factor
, TRUE
));
884 debug_printf("alpha_dst_factor = %s\n", util_dump_blend_factor(key
->blend
.rt
[0].alpha_dst_factor
, TRUE
));
886 debug_printf("blend.colormask = 0x%x\n", key
->blend
.rt
[0].colormask
);
887 for(i
= 0; i
< PIPE_MAX_SAMPLERS
; ++i
) {
888 if(key
->sampler
[i
].format
) {
889 debug_printf("sampler[%u] = \n", i
);
890 debug_printf(" .format = %s\n",
891 util_format_name(key
->sampler
[i
].format
));
892 debug_printf(" .target = %s\n",
893 util_dump_tex_target(key
->sampler
[i
].target
, TRUE
));
894 debug_printf(" .pot = %u %u %u\n",
895 key
->sampler
[i
].pot_width
,
896 key
->sampler
[i
].pot_height
,
897 key
->sampler
[i
].pot_depth
);
898 debug_printf(" .wrap = %s %s %s\n",
899 util_dump_tex_wrap(key
->sampler
[i
].wrap_s
, TRUE
),
900 util_dump_tex_wrap(key
->sampler
[i
].wrap_t
, TRUE
),
901 util_dump_tex_wrap(key
->sampler
[i
].wrap_r
, TRUE
));
902 debug_printf(" .min_img_filter = %s\n",
903 util_dump_tex_filter(key
->sampler
[i
].min_img_filter
, TRUE
));
904 debug_printf(" .min_mip_filter = %s\n",
905 util_dump_tex_mipfilter(key
->sampler
[i
].min_mip_filter
, TRUE
));
906 debug_printf(" .mag_img_filter = %s\n",
907 util_dump_tex_filter(key
->sampler
[i
].mag_img_filter
, TRUE
));
908 if(key
->sampler
[i
].compare_mode
!= PIPE_TEX_COMPARE_NONE
)
909 debug_printf(" .compare_func = %s\n", util_dump_func(key
->sampler
[i
].compare_func
, TRUE
));
910 debug_printf(" .normalized_coords = %u\n", key
->sampler
[i
].normalized_coords
);
915 variant
= CALLOC_STRUCT(lp_fragment_shader_variant
);
919 variant
->shader
= shader
;
920 memcpy(&variant
->key
, key
, sizeof *key
);
922 generate_fragment(lp
, shader
, variant
, 0);
923 generate_fragment(lp
, shader
, variant
, 1);
925 /* insert new variant into linked list */
926 variant
->next
= shader
->variants
;
927 shader
->variants
= variant
;
934 llvmpipe_create_fs_state(struct pipe_context
*pipe
,
935 const struct pipe_shader_state
*templ
)
937 struct lp_fragment_shader
*shader
;
939 shader
= CALLOC_STRUCT(lp_fragment_shader
);
943 /* get/save the summary info for this shader */
944 tgsi_scan_shader(templ
->tokens
, &shader
->info
);
946 /* we need to keep a local copy of the tokens */
947 shader
->base
.tokens
= tgsi_dup_tokens(templ
->tokens
);
954 llvmpipe_bind_fs_state(struct pipe_context
*pipe
, void *fs
)
956 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
958 if (llvmpipe
->fs
== fs
)
961 draw_flush(llvmpipe
->draw
);
965 llvmpipe
->dirty
|= LP_NEW_FS
;
970 llvmpipe_delete_fs_state(struct pipe_context
*pipe
, void *fs
)
972 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
973 struct llvmpipe_screen
*screen
= llvmpipe_screen(pipe
->screen
);
974 struct lp_fragment_shader
*shader
= fs
;
975 struct lp_fragment_shader_variant
*variant
;
977 assert(fs
!= llvmpipe
->fs
);
981 * XXX: we need to flush the context until we have some sort of reference
982 * counting in fragment shaders as they may still be binned
984 draw_flush(llvmpipe
->draw
);
985 lp_setup_flush(llvmpipe
->setup
, 0);
987 variant
= shader
->variants
;
989 struct lp_fragment_shader_variant
*next
= variant
->next
;
992 for (i
= 0; i
< Elements(variant
->function
); i
++) {
993 if (variant
->function
[i
]) {
994 if (variant
->jit_function
[i
])
995 LLVMFreeMachineCodeForFunction(screen
->engine
,
996 variant
->function
[i
]);
997 LLVMDeleteFunction(variant
->function
[i
]);
1006 FREE((void *) shader
->base
.tokens
);
1013 llvmpipe_set_constant_buffer(struct pipe_context
*pipe
,
1014 uint shader
, uint index
,
1015 struct pipe_buffer
*constants
)
1017 struct llvmpipe_context
*llvmpipe
= llvmpipe_context(pipe
);
1018 unsigned size
= constants
? constants
->size
: 0;
1019 const void *data
= constants
? llvmpipe_buffer(constants
)->data
: NULL
;
1021 assert(shader
< PIPE_SHADER_TYPES
);
1024 if(llvmpipe
->constants
[shader
] == constants
)
1027 draw_flush(llvmpipe
->draw
);
1029 /* note: reference counting */
1030 pipe_buffer_reference(&llvmpipe
->constants
[shader
], constants
);
1032 if(shader
== PIPE_SHADER_VERTEX
) {
1033 draw_set_mapped_constant_buffer(llvmpipe
->draw
, PIPE_SHADER_VERTEX
, 0,
1037 llvmpipe
->dirty
|= LP_NEW_CONSTANTS
;
1042 * We need to generate several variants of the fragment pipeline to match
1043 * all the combinations of the contributing state atoms.
1045 * TODO: there is actually no reason to tie this to context state -- the
1046 * generated code could be cached globally in the screen.
1049 make_variant_key(struct llvmpipe_context
*lp
,
1050 struct lp_fragment_shader
*shader
,
1051 struct lp_fragment_shader_variant_key
*key
)
1055 memset(key
, 0, sizeof *key
);
1057 if(lp
->framebuffer
.zsbuf
&&
1058 lp
->depth_stencil
->depth
.enabled
) {
1059 key
->zsbuf_format
= lp
->framebuffer
.zsbuf
->format
;
1060 memcpy(&key
->depth
, &lp
->depth_stencil
->depth
, sizeof key
->depth
);
1063 key
->alpha
.enabled
= lp
->depth_stencil
->alpha
.enabled
;
1064 if(key
->alpha
.enabled
)
1065 key
->alpha
.func
= lp
->depth_stencil
->alpha
.func
;
1066 /* alpha.ref_value is passed in jit_context */
1068 key
->flatshade
= lp
->rasterizer
->flatshade
;
1069 key
->scissor
= lp
->rasterizer
->scissor
;
1071 if (lp
->framebuffer
.nr_cbufs
) {
1072 memcpy(&key
->blend
, lp
->blend
, sizeof key
->blend
);
1075 key
->nr_cbufs
= lp
->framebuffer
.nr_cbufs
;
1076 for (i
= 0; i
< lp
->framebuffer
.nr_cbufs
; i
++) {
1077 const struct util_format_description
*format_desc
;
1080 format_desc
= util_format_description(lp
->framebuffer
.cbufs
[i
]->format
);
1081 assert(format_desc
->layout
== UTIL_FORMAT_COLORSPACE_RGB
||
1082 format_desc
->layout
== UTIL_FORMAT_COLORSPACE_SRGB
);
1084 /* mask out color channels not present in the color buffer.
1085 * Should be simple to incorporate per-cbuf writemasks:
1087 for(chan
= 0; chan
< 4; ++chan
) {
1088 enum util_format_swizzle swizzle
= format_desc
->swizzle
[chan
];
1090 if(swizzle
<= UTIL_FORMAT_SWIZZLE_W
)
1091 key
->blend
.rt
[0].colormask
|= (1 << chan
);
1095 for(i
= 0; i
< PIPE_MAX_SAMPLERS
; ++i
)
1096 if(shader
->info
.file_mask
[TGSI_FILE_SAMPLER
] & (1 << i
))
1097 lp_sampler_static_state(&key
->sampler
[i
], lp
->fragment_sampler_views
[i
]->texture
, lp
->sampler
[i
]);
1102 * Update fragment state. This is called just prior to drawing
1103 * something when some fragment-related state has changed.
1106 llvmpipe_update_fs(struct llvmpipe_context
*lp
)
1108 struct lp_fragment_shader
*shader
= lp
->fs
;
1109 struct lp_fragment_shader_variant_key key
;
1110 struct lp_fragment_shader_variant
*variant
;
1113 make_variant_key(lp
, shader
, &key
);
1115 variant
= shader
->variants
;
1117 if(memcmp(&variant
->key
, &key
, sizeof key
) == 0)
1120 variant
= variant
->next
;
1128 variant
= generate_variant(lp
, shader
, &key
);
1132 LP_COUNT_ADD(llvm_compile_time
, dt
);
1133 LP_COUNT_ADD(nr_llvm_compiles
, 2); /* emit vs. omit in/out test */
1136 shader
->current
= variant
;
1138 /* TODO: put this in the variant */
1139 /* TODO: most of these can be relaxed, in particular the colormask */
1140 opaque
= !key
.blend
.logicop_enable
&&
1141 !key
.blend
.rt
[0].blend_enable
&&
1142 key
.blend
.rt
[0].colormask
== 0xf &&
1143 !key
.alpha
.enabled
&&
1144 !key
.depth
.enabled
&&
1146 !shader
->info
.uses_kill
1149 lp_setup_set_fs_functions(lp
->setup
,
1150 shader
->current
->jit_function
[0],
1151 shader
->current
->jit_function
[1],