1 /**************************************************************************
3 * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * Copyright 2009 VMware, Inc. All rights reserved.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
32 * Generate SPU fragment program/shader code.
34 * Note that we generate SOA-style code here. So each TGSI instruction
35 * operates on four pixels (and is translated into four SPU instructions,
36 * generally speaking).
42 #include "pipe/p_defines.h"
43 #include "pipe/p_state.h"
44 #include "pipe/p_shader_tokens.h"
45 #include "tgsi/tgsi_parse.h"
46 #include "tgsi/tgsi_util.h"
47 #include "tgsi/tgsi_exec.h"
48 #include "tgsi/tgsi_dump.h"
49 #include "rtasm/rtasm_ppc_spe.h"
50 #include "util/u_memory.h"
51 #include "cell_context.h"
52 #include "cell_gen_fp.h"
64 * Context needed during code generation.
68 struct cell_context
*cell
;
69 int inputs_reg
; /**< 1st function parameter */
70 int outputs_reg
; /**< 2nd function parameter */
71 int constants_reg
; /**< 3rd function parameter */
72 int temp_regs
[MAX_TEMPS
][4]; /**< maps TGSI temps to SPE registers */
73 int imm_regs
[MAX_IMMED
][4]; /**< maps TGSI immediates to SPE registers */
75 int num_imm
; /**< number of immediates */
77 int one_reg
; /**< register containing {1.0, 1.0, 1.0, 1.0} */
79 int addr_reg
; /**< address register, integer values */
81 /** Per-instruction temps / intermediate temps */
85 /** Current IF/ELSE/ENDIF nesting level */
87 /** Current BGNLOOP/ENDLOOP nesting level */
89 /** Location of start of current loop */
92 /** Index of if/conditional mask register */
94 /** Index of loop mask register */
97 /** Index of master execution mask register */
100 /** KIL mask: indicates which fragments have been killed */
103 int frame_size
; /**< Stack frame size, in words */
105 struct spe_function
*f
;
111 * Allocate an intermediate temporary register.
114 get_itemp(struct codegen
*gen
)
116 int t
= spe_allocate_available_register(gen
->f
);
117 assert(gen
->num_itemps
< Elements(gen
->itemps
));
118 gen
->itemps
[gen
->num_itemps
++] = t
;
123 * Free all intermediate temporary registers. To be called after each
124 * instruction has been emitted.
127 free_itemps(struct codegen
*gen
)
130 for (i
= 0; i
< gen
->num_itemps
; i
++) {
131 spe_release_register(gen
->f
, gen
->itemps
[i
]);
138 * Return index of an SPE register containing {1.0, 1.0, 1.0, 1.0}.
139 * The register is allocated and initialized upon the first call.
142 get_const_one_reg(struct codegen
*gen
)
144 if (gen
->one_reg
<= 0) {
145 gen
->one_reg
= spe_allocate_available_register(gen
->f
);
147 spe_indent(gen
->f
, 4);
148 spe_comment(gen
->f
, -4, "init constant reg = 1.0:");
150 /* one = {1.0, 1.0, 1.0, 1.0} */
151 spe_load_float(gen
->f
, gen
->one_reg
, 1.0f
);
153 spe_indent(gen
->f
, -4);
161 * Return index of the address register.
162 * Used for indirect register loads/stores.
165 get_address_reg(struct codegen
*gen
)
167 if (gen
->addr_reg
<= 0) {
168 gen
->addr_reg
= spe_allocate_available_register(gen
->f
);
170 spe_indent(gen
->f
, 4);
171 spe_comment(gen
->f
, -4, "init address reg = 0:");
173 /* init addr = {0, 0, 0, 0} */
174 spe_zero(gen
->f
, gen
->addr_reg
);
176 spe_indent(gen
->f
, -4);
179 return gen
->addr_reg
;
184 * Return index of the master execution mask.
185 * The register is allocated an initialized upon the first call.
187 * The master execution mask controls which pixels in a quad are
188 * modified, according to surrounding conditionals, loops, etc.
191 get_exec_mask_reg(struct codegen
*gen
)
193 if (gen
->exec_mask_reg
<= 0) {
194 gen
->exec_mask_reg
= spe_allocate_available_register(gen
->f
);
196 /* XXX this may not be needed */
197 spe_comment(gen
->f
, 0*-4, "initialize master execution mask = ~0");
198 spe_load_int(gen
->f
, gen
->exec_mask_reg
, ~0);
201 return gen
->exec_mask_reg
;
205 /** Return index of the conditional (if/else) execution mask register */
207 get_cond_mask_reg(struct codegen
*gen
)
209 if (gen
->cond_mask_reg
<= 0) {
210 gen
->cond_mask_reg
= spe_allocate_available_register(gen
->f
);
213 return gen
->cond_mask_reg
;
217 /** Return index of the loop execution mask register */
219 get_loop_mask_reg(struct codegen
*gen
)
221 if (gen
->loop_mask_reg
<= 0) {
222 gen
->loop_mask_reg
= spe_allocate_available_register(gen
->f
);
225 return gen
->loop_mask_reg
;
231 is_register_src(struct codegen
*gen
, int channel
,
232 const struct tgsi_full_src_register
*src
)
234 int swizzle
= tgsi_util_get_full_src_register_swizzle(src
, channel
);
235 int sign_op
= tgsi_util_get_full_src_register_sign_mode(src
, channel
);
237 if (swizzle
> TGSI_SWIZZLE_W
|| sign_op
!= TGSI_UTIL_SIGN_KEEP
) {
240 if (src
->Register
.File
== TGSI_FILE_TEMPORARY
||
241 src
->Register
.File
== TGSI_FILE_IMMEDIATE
) {
249 is_memory_dst(struct codegen
*gen
, int channel
,
250 const struct tgsi_full_dst_register
*dst
)
252 if (dst
->Register
.File
== TGSI_FILE_OUTPUT
) {
262 * Return the index of the SPU temporary containing the named TGSI
263 * source register. If the TGSI register is a TGSI_FILE_TEMPORARY we
264 * just return the corresponding SPE register. If the TGIS register
265 * is TGSI_FILE_INPUT/CONSTANT/IMMEDIATE we allocate a new SPE register
266 * and emit an SPE load instruction.
269 get_src_reg(struct codegen
*gen
,
271 const struct tgsi_full_src_register
*src
)
274 int swizzle
= tgsi_util_get_full_src_register_swizzle(src
, channel
);
275 boolean reg_is_itemp
= FALSE
;
278 assert(swizzle
>= TGSI_SWIZZLE_X
);
279 assert(swizzle
<= TGSI_SWIZZLE_W
);
282 int index
= src
->Register
.Index
;
286 if (src
->Register
.Indirect
) {
290 switch (src
->Register
.File
) {
291 case TGSI_FILE_TEMPORARY
:
292 reg
= gen
->temp_regs
[index
][swizzle
];
294 case TGSI_FILE_INPUT
:
296 /* offset is measured in quadwords, not bytes */
297 int offset
= index
* 4 + swizzle
;
298 reg
= get_itemp(gen
);
300 /* Load: reg = memory[(machine_reg) + offset] */
301 spe_lqd(gen
->f
, reg
, gen
->inputs_reg
, offset
* 16);
304 case TGSI_FILE_IMMEDIATE
:
305 reg
= gen
->imm_regs
[index
][swizzle
];
307 case TGSI_FILE_CONSTANT
:
309 /* offset is measured in quadwords, not bytes */
310 int offset
= index
* 4 + swizzle
;
311 reg
= get_itemp(gen
);
313 /* Load: reg = memory[(machine_reg) + offset] */
314 spe_lqd(gen
->f
, reg
, gen
->constants_reg
, offset
* 16);
323 * Handle absolute value, negate or set-negative of src register.
325 sign_op
= tgsi_util_get_full_src_register_sign_mode(src
, channel
);
326 if (sign_op
!= TGSI_UTIL_SIGN_KEEP
) {
328 * All sign ops are done by manipulating bit 31, the IEEE float sign bit.
330 const int bit31mask_reg
= get_itemp(gen
);
334 /* re-use 'reg' for the result */
338 /* alloc a new reg for the result */
339 result_reg
= get_itemp(gen
);
342 /* mask with bit 31 set, the rest cleared */
343 spe_load_uint(gen
->f
, bit31mask_reg
, (1 << 31));
345 if (sign_op
== TGSI_UTIL_SIGN_CLEAR
) {
346 spe_andc(gen
->f
, result_reg
, reg
, bit31mask_reg
);
348 else if (sign_op
== TGSI_UTIL_SIGN_SET
) {
349 spe_and(gen
->f
, result_reg
, reg
, bit31mask_reg
);
352 assert(sign_op
== TGSI_UTIL_SIGN_TOGGLE
);
353 spe_xor(gen
->f
, result_reg
, reg
, bit31mask_reg
);
364 * Return the index of an SPE register to use for the given TGSI register.
365 * If the TGSI register is TGSI_FILE_TEMPORARAY, the index of the
366 * corresponding SPE register is returned. If the TGSI register is
367 * TGSI_FILE_OUTPUT we allocate an intermediate temporary register.
368 * See store_dest_reg() below...
371 get_dst_reg(struct codegen
*gen
,
373 const struct tgsi_full_dst_register
*dest
)
377 switch (dest
->Register
.File
) {
378 case TGSI_FILE_TEMPORARY
:
379 if (gen
->if_nesting
> 0 || gen
->loop_nesting
> 0)
380 reg
= get_itemp(gen
);
382 reg
= gen
->temp_regs
[dest
->Register
.Index
][channel
];
384 case TGSI_FILE_OUTPUT
:
385 reg
= get_itemp(gen
);
396 * When a TGSI instruction is writing to an output register, this
397 * function emits the SPE store instruction to store the value_reg.
398 * \param value_reg the SPE register containing the value to store.
399 * This would have been returned by get_dst_reg().
402 store_dest_reg(struct codegen
*gen
,
403 int value_reg
, int channel
,
404 const struct tgsi_full_dst_register
*dest
)
407 * XXX need to implement dst reg clamping/saturation
410 switch (inst
->Instruction
.Saturate
) {
413 case TGSI_SAT_ZERO_ONE
:
415 case TGSI_SAT_MINUS_PLUS_ONE
:
422 switch (dest
->Register
.File
) {
423 case TGSI_FILE_TEMPORARY
:
424 if (gen
->if_nesting
> 0 || gen
->loop_nesting
> 0) {
425 int d_reg
= gen
->temp_regs
[dest
->Register
.Index
][channel
];
426 int exec_reg
= get_exec_mask_reg(gen
);
427 /* Mix d with new value according to exec mask:
428 * d[i] = mask_reg[i] ? value_reg : d_reg
430 spe_selb(gen
->f
, d_reg
, d_reg
, value_reg
, exec_reg
);
433 /* we're not inside a condition or loop: do nothing special */
437 case TGSI_FILE_OUTPUT
:
439 /* offset is measured in quadwords, not bytes */
440 int offset
= dest
->Register
.Index
* 4 + channel
;
441 if (gen
->if_nesting
> 0 || gen
->loop_nesting
> 0) {
442 int exec_reg
= get_exec_mask_reg(gen
);
443 int curval_reg
= get_itemp(gen
);
444 /* First read the current value from memory:
445 * Load: curval = memory[(machine_reg) + offset]
447 spe_lqd(gen
->f
, curval_reg
, gen
->outputs_reg
, offset
* 16);
448 /* Mix curval with newvalue according to exec mask:
449 * d[i] = mask_reg[i] ? value_reg : d_reg
451 spe_selb(gen
->f
, curval_reg
, curval_reg
, value_reg
, exec_reg
);
452 /* Store: memory[(machine_reg) + offset] = curval */
453 spe_stqd(gen
->f
, curval_reg
, gen
->outputs_reg
, offset
* 16);
456 /* Store: memory[(machine_reg) + offset] = reg */
457 spe_stqd(gen
->f
, value_reg
, gen
->outputs_reg
, offset
* 16);
469 emit_prologue(struct codegen
*gen
)
471 gen
->frame_size
= 1024; /* XXX temporary, should be dynamic */
473 spe_comment(gen
->f
, 0, "Function prologue:");
475 /* save $lr on stack # stqd $lr,16($sp) */
476 spe_stqd(gen
->f
, SPE_REG_RA
, SPE_REG_SP
, 16);
478 if (gen
->frame_size
>= 512) {
479 /* offset is too large for ai instruction */
480 int offset_reg
= spe_allocate_available_register(gen
->f
);
481 int sp_reg
= spe_allocate_available_register(gen
->f
);
482 /* offset = -framesize */
483 spe_load_int(gen
->f
, offset_reg
, -gen
->frame_size
);
485 spe_move(gen
->f
, sp_reg
, SPE_REG_SP
);
486 /* $sp = $sp + offset_reg */
487 spe_a(gen
->f
, SPE_REG_SP
, SPE_REG_SP
, offset_reg
);
488 /* save $sp in stack frame */
489 spe_stqd(gen
->f
, sp_reg
, SPE_REG_SP
, 0);
491 spe_release_register(gen
->f
, offset_reg
);
492 spe_release_register(gen
->f
, sp_reg
);
495 /* save stack pointer # stqd $sp,-frameSize($sp) */
496 spe_stqd(gen
->f
, SPE_REG_SP
, SPE_REG_SP
, -gen
->frame_size
);
498 /* adjust stack pointer # ai $sp,$sp,-frameSize */
499 spe_ai(gen
->f
, SPE_REG_SP
, SPE_REG_SP
, -gen
->frame_size
);
505 emit_epilogue(struct codegen
*gen
)
507 const int return_reg
= 3;
509 spe_comment(gen
->f
, 0, "Function epilogue:");
511 spe_comment(gen
->f
, 0, "return the killed mask");
512 if (gen
->kill_mask_reg
> 0) {
513 /* shader called KIL, return the "alive" mask */
514 spe_move(gen
->f
, return_reg
, gen
->kill_mask_reg
);
517 /* return {0,0,0,0} */
518 spe_load_uint(gen
->f
, return_reg
, 0);
521 spe_comment(gen
->f
, 0, "restore stack and return");
522 if (gen
->frame_size
>= 512) {
523 /* offset is too large for ai instruction */
524 int offset_reg
= spe_allocate_available_register(gen
->f
);
525 /* offset = framesize */
526 spe_load_int(gen
->f
, offset_reg
, gen
->frame_size
);
527 /* $sp = $sp + offset */
528 spe_a(gen
->f
, SPE_REG_SP
, SPE_REG_SP
, offset_reg
);
530 spe_release_register(gen
->f
, offset_reg
);
533 /* restore stack pointer # ai $sp,$sp,frameSize */
534 spe_ai(gen
->f
, SPE_REG_SP
, SPE_REG_SP
, gen
->frame_size
);
537 /* restore $lr # lqd $lr,16($sp) */
538 spe_lqd(gen
->f
, SPE_REG_RA
, SPE_REG_SP
, 16);
540 /* return from function call */
541 spe_bi(gen
->f
, SPE_REG_RA
, 0, 0);
545 #define FOR_EACH_ENABLED_CHANNEL(inst, ch) \
546 for (ch = 0; ch < 4; ch++) \
547 if (inst->Dst[0].Register.WriteMask & (1 << ch))
551 emit_ARL(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
553 int ch
= 0, src_reg
, addr_reg
;
555 src_reg
= get_src_reg(gen
, ch
, &inst
->Src
[0]);
556 addr_reg
= get_address_reg(gen
);
558 /* convert float to int */
559 spe_cflts(gen
->f
, addr_reg
, src_reg
, 0);
568 emit_MOV(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
570 int ch
, src_reg
[4], dst_reg
[4];
572 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
573 src_reg
[ch
] = get_src_reg(gen
, ch
, &inst
->Src
[0]);
574 dst_reg
[ch
] = get_dst_reg(gen
, ch
, &inst
->Dst
[0]);
577 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
578 if (is_register_src(gen
, ch
, &inst
->Src
[0]) &&
579 is_memory_dst(gen
, ch
, &inst
->Dst
[0])) {
580 /* special-case: register to memory store */
581 store_dest_reg(gen
, src_reg
[ch
], ch
, &inst
->Dst
[0]);
584 spe_move(gen
->f
, dst_reg
[ch
], src_reg
[ch
]);
585 store_dest_reg(gen
, dst_reg
[ch
], ch
, &inst
->Dst
[0]);
595 * Emit binary operation
598 emit_binop(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
600 int ch
, s1_reg
[4], s2_reg
[4], d_reg
[4];
602 /* Loop over Red/Green/Blue/Alpha channels, fetch src operands */
603 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
604 s1_reg
[ch
] = get_src_reg(gen
, ch
, &inst
->Src
[0]);
605 s2_reg
[ch
] = get_src_reg(gen
, ch
, &inst
->Src
[1]);
606 d_reg
[ch
] = get_dst_reg(gen
, ch
, &inst
->Dst
[0]);
609 /* Loop over Red/Green/Blue/Alpha channels, do the op, store results */
610 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
611 /* Emit actual SPE instruction: d = s1 + s2 */
612 switch (inst
->Instruction
.Opcode
) {
613 case TGSI_OPCODE_ADD
:
614 spe_fa(gen
->f
, d_reg
[ch
], s1_reg
[ch
], s2_reg
[ch
]);
616 case TGSI_OPCODE_SUB
:
617 spe_fs(gen
->f
, d_reg
[ch
], s1_reg
[ch
], s2_reg
[ch
]);
619 case TGSI_OPCODE_MUL
:
620 spe_fm(gen
->f
, d_reg
[ch
], s1_reg
[ch
], s2_reg
[ch
]);
627 /* Store the result (a no-op for TGSI_FILE_TEMPORARY dests) */
628 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
629 store_dest_reg(gen
, d_reg
[ch
], ch
, &inst
->Dst
[0]);
632 /* Free any intermediate temps we allocated */
640 * Emit multiply add. See emit_ADD for comments.
643 emit_MAD(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
645 int ch
, s1_reg
[4], s2_reg
[4], s3_reg
[4], d_reg
[4];
647 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
648 s1_reg
[ch
] = get_src_reg(gen
, ch
, &inst
->Src
[0]);
649 s2_reg
[ch
] = get_src_reg(gen
, ch
, &inst
->Src
[1]);
650 s3_reg
[ch
] = get_src_reg(gen
, ch
, &inst
->Src
[2]);
651 d_reg
[ch
] = get_dst_reg(gen
, ch
, &inst
->Dst
[0]);
653 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
654 spe_fma(gen
->f
, d_reg
[ch
], s1_reg
[ch
], s2_reg
[ch
], s3_reg
[ch
]);
656 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
657 store_dest_reg(gen
, d_reg
[ch
], ch
, &inst
->Dst
[0]);
665 * Emit linear interpolate. See emit_ADD for comments.
668 emit_LRP(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
670 int ch
, s1_reg
[4], s2_reg
[4], s3_reg
[4], d_reg
[4], tmp_reg
[4];
672 /* setup/get src/dst/temp regs */
673 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
674 s1_reg
[ch
] = get_src_reg(gen
, ch
, &inst
->Src
[0]);
675 s2_reg
[ch
] = get_src_reg(gen
, ch
, &inst
->Src
[1]);
676 s3_reg
[ch
] = get_src_reg(gen
, ch
, &inst
->Src
[2]);
677 d_reg
[ch
] = get_dst_reg(gen
, ch
, &inst
->Dst
[0]);
678 tmp_reg
[ch
] = get_itemp(gen
);
681 /* d = s3 + s1(s2 - s3) */
682 /* do all subtracts, then all fma, then all stores to better pipeline */
683 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
684 spe_fs(gen
->f
, tmp_reg
[ch
], s2_reg
[ch
], s3_reg
[ch
]);
686 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
687 spe_fma(gen
->f
, d_reg
[ch
], tmp_reg
[ch
], s1_reg
[ch
], s3_reg
[ch
]);
689 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
690 store_dest_reg(gen
, d_reg
[ch
], ch
, &inst
->Dst
[0]);
699 * Emit reciprocal or recip sqrt.
702 emit_RCP_RSQ(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
704 int ch
, s1_reg
[4], d_reg
[4], tmp_reg
[4];
706 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
707 s1_reg
[ch
] = get_src_reg(gen
, ch
, &inst
->Src
[0]);
708 d_reg
[ch
] = get_dst_reg(gen
, ch
, &inst
->Dst
[0]);
709 tmp_reg
[ch
] = get_itemp(gen
);
712 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
713 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_RCP
) {
715 spe_frest(gen
->f
, tmp_reg
[ch
], s1_reg
[ch
]);
718 /* tmp = 1/sqrt(s1) */
719 spe_frsqest(gen
->f
, tmp_reg
[ch
], s1_reg
[ch
]);
723 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
724 /* d = float_interp(s1, tmp) */
725 spe_fi(gen
->f
, d_reg
[ch
], s1_reg
[ch
], tmp_reg
[ch
]);
728 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
729 store_dest_reg(gen
, d_reg
[ch
], ch
, &inst
->Dst
[0]);
738 * Emit absolute value. See emit_ADD for comments.
741 emit_ABS(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
743 int ch
, s1_reg
[4], d_reg
[4];
744 const int bit31mask_reg
= get_itemp(gen
);
746 /* mask with bit 31 set, the rest cleared */
747 spe_load_uint(gen
->f
, bit31mask_reg
, (1 << 31));
749 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
750 s1_reg
[ch
] = get_src_reg(gen
, ch
, &inst
->Src
[0]);
751 d_reg
[ch
] = get_dst_reg(gen
, ch
, &inst
->Dst
[0]);
754 /* d = sign bit cleared in s1 */
755 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
756 spe_andc(gen
->f
, d_reg
[ch
], s1_reg
[ch
], bit31mask_reg
);
759 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
760 store_dest_reg(gen
, d_reg
[ch
], ch
, &inst
->Dst
[0]);
768 * Emit 3 component dot product. See emit_ADD for comments.
771 emit_DP3(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
774 int s1x_reg
, s1y_reg
, s1z_reg
;
775 int s2x_reg
, s2y_reg
, s2z_reg
;
776 int t0_reg
= get_itemp(gen
), t1_reg
= get_itemp(gen
);
778 s1x_reg
= get_src_reg(gen
, CHAN_X
, &inst
->Src
[0]);
779 s2x_reg
= get_src_reg(gen
, CHAN_X
, &inst
->Src
[1]);
780 s1y_reg
= get_src_reg(gen
, CHAN_Y
, &inst
->Src
[0]);
781 s2y_reg
= get_src_reg(gen
, CHAN_Y
, &inst
->Src
[1]);
782 s1z_reg
= get_src_reg(gen
, CHAN_Z
, &inst
->Src
[0]);
783 s2z_reg
= get_src_reg(gen
, CHAN_Z
, &inst
->Src
[1]);
786 spe_fm(gen
->f
, t0_reg
, s1x_reg
, s2x_reg
);
789 spe_fm(gen
->f
, t1_reg
, s1y_reg
, s2y_reg
);
791 /* t0 = z0 * z1 + t0 */
792 spe_fma(gen
->f
, t0_reg
, s1z_reg
, s2z_reg
, t0_reg
);
795 spe_fa(gen
->f
, t0_reg
, t0_reg
, t1_reg
);
797 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
798 int d_reg
= get_dst_reg(gen
, ch
, &inst
->Dst
[0]);
799 spe_move(gen
->f
, d_reg
, t0_reg
);
800 store_dest_reg(gen
, d_reg
, ch
, &inst
->Dst
[0]);
808 * Emit 4 component dot product. See emit_ADD for comments.
811 emit_DP4(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
814 int s0x_reg
, s0y_reg
, s0z_reg
, s0w_reg
;
815 int s1x_reg
, s1y_reg
, s1z_reg
, s1w_reg
;
816 int t0_reg
= get_itemp(gen
), t1_reg
= get_itemp(gen
);
818 s0x_reg
= get_src_reg(gen
, CHAN_X
, &inst
->Src
[0]);
819 s1x_reg
= get_src_reg(gen
, CHAN_X
, &inst
->Src
[1]);
820 s0y_reg
= get_src_reg(gen
, CHAN_Y
, &inst
->Src
[0]);
821 s1y_reg
= get_src_reg(gen
, CHAN_Y
, &inst
->Src
[1]);
822 s0z_reg
= get_src_reg(gen
, CHAN_Z
, &inst
->Src
[0]);
823 s1z_reg
= get_src_reg(gen
, CHAN_Z
, &inst
->Src
[1]);
824 s0w_reg
= get_src_reg(gen
, CHAN_W
, &inst
->Src
[0]);
825 s1w_reg
= get_src_reg(gen
, CHAN_W
, &inst
->Src
[1]);
828 spe_fm(gen
->f
, t0_reg
, s0x_reg
, s1x_reg
);
831 spe_fm(gen
->f
, t1_reg
, s0y_reg
, s1y_reg
);
833 /* t0 = z0 * z1 + t0 */
834 spe_fma(gen
->f
, t0_reg
, s0z_reg
, s1z_reg
, t0_reg
);
836 /* t1 = w0 * w1 + t1 */
837 spe_fma(gen
->f
, t1_reg
, s0w_reg
, s1w_reg
, t1_reg
);
840 spe_fa(gen
->f
, t0_reg
, t0_reg
, t1_reg
);
842 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
843 int d_reg
= get_dst_reg(gen
, ch
, &inst
->Dst
[0]);
844 spe_move(gen
->f
, d_reg
, t0_reg
);
845 store_dest_reg(gen
, d_reg
, ch
, &inst
->Dst
[0]);
853 * Emit homogeneous dot product. See emit_ADD for comments.
856 emit_DPH(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
858 /* XXX rewrite this function to look more like DP3/DP4 */
860 int s1_reg
= get_src_reg(gen
, CHAN_X
, &inst
->Src
[0]);
861 int s2_reg
= get_src_reg(gen
, CHAN_X
, &inst
->Src
[1]);
862 int tmp_reg
= get_itemp(gen
);
865 spe_fm(gen
->f
, tmp_reg
, s1_reg
, s2_reg
);
867 s1_reg
= get_src_reg(gen
, CHAN_Y
, &inst
->Src
[0]);
868 s2_reg
= get_src_reg(gen
, CHAN_Y
, &inst
->Src
[1]);
869 /* t = y0 * y1 + t */
870 spe_fma(gen
->f
, tmp_reg
, s1_reg
, s2_reg
, tmp_reg
);
872 s1_reg
= get_src_reg(gen
, CHAN_Z
, &inst
->Src
[0]);
873 s2_reg
= get_src_reg(gen
, CHAN_Z
, &inst
->Src
[1]);
874 /* t = z0 * z1 + t */
875 spe_fma(gen
->f
, tmp_reg
, s1_reg
, s2_reg
, tmp_reg
);
877 s2_reg
= get_src_reg(gen
, CHAN_W
, &inst
->Src
[1]);
879 spe_fa(gen
->f
, tmp_reg
, s2_reg
, tmp_reg
);
881 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
882 int d_reg
= get_dst_reg(gen
, ch
, &inst
->Dst
[0]);
883 spe_move(gen
->f
, d_reg
, tmp_reg
);
884 store_dest_reg(gen
, tmp_reg
, ch
, &inst
->Dst
[0]);
892 * Emit 3-component vector normalize.
895 emit_NRM3(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
899 int t0_reg
= get_itemp(gen
), t1_reg
= get_itemp(gen
);
901 src_reg
[0] = get_src_reg(gen
, CHAN_X
, &inst
->Src
[0]);
902 src_reg
[1] = get_src_reg(gen
, CHAN_Y
, &inst
->Src
[0]);
903 src_reg
[2] = get_src_reg(gen
, CHAN_Z
, &inst
->Src
[0]);
906 spe_fm(gen
->f
, t0_reg
, src_reg
[0], src_reg
[0]);
909 spe_fm(gen
->f
, t1_reg
, src_reg
[1], src_reg
[1]);
911 /* t0 = z * z + t0 */
912 spe_fma(gen
->f
, t0_reg
, src_reg
[2], src_reg
[2], t0_reg
);
915 spe_fa(gen
->f
, t0_reg
, t0_reg
, t1_reg
);
917 /* t1 = 1.0 / sqrt(t0) */
918 spe_frsqest(gen
->f
, t1_reg
, t0_reg
);
919 spe_fi(gen
->f
, t1_reg
, t0_reg
, t1_reg
);
921 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
922 int d_reg
= get_dst_reg(gen
, ch
, &inst
->Dst
[0]);
923 /* dst = src[ch] * t1 */
924 spe_fm(gen
->f
, d_reg
, src_reg
[ch
], t1_reg
);
925 store_dest_reg(gen
, d_reg
, ch
, &inst
->Dst
[0]);
934 * Emit cross product. See emit_ADD for comments.
937 emit_XPD(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
939 int s1_reg
= get_src_reg(gen
, CHAN_Z
, &inst
->Src
[0]);
940 int s2_reg
= get_src_reg(gen
, CHAN_Y
, &inst
->Src
[1]);
941 int tmp_reg
= get_itemp(gen
);
944 spe_fm(gen
->f
, tmp_reg
, s1_reg
, s2_reg
);
946 s1_reg
= get_src_reg(gen
, CHAN_Y
, &inst
->Src
[0]);
947 s2_reg
= get_src_reg(gen
, CHAN_Z
, &inst
->Src
[1]);
948 /* t = y0 * z1 - t */
949 spe_fms(gen
->f
, tmp_reg
, s1_reg
, s2_reg
, tmp_reg
);
951 if (inst
->Dst
[0].Register
.WriteMask
& (1 << CHAN_X
)) {
952 store_dest_reg(gen
, tmp_reg
, CHAN_X
, &inst
->Dst
[0]);
955 s1_reg
= get_src_reg(gen
, CHAN_X
, &inst
->Src
[0]);
956 s2_reg
= get_src_reg(gen
, CHAN_Z
, &inst
->Src
[1]);
958 spe_fm(gen
->f
, tmp_reg
, s1_reg
, s2_reg
);
960 s1_reg
= get_src_reg(gen
, CHAN_Z
, &inst
->Src
[0]);
961 s2_reg
= get_src_reg(gen
, CHAN_X
, &inst
->Src
[1]);
962 /* t = z0 * x1 - t */
963 spe_fms(gen
->f
, tmp_reg
, s1_reg
, s2_reg
, tmp_reg
);
965 if (inst
->Dst
[0].Register
.WriteMask
& (1 << CHAN_Y
)) {
966 store_dest_reg(gen
, tmp_reg
, CHAN_Y
, &inst
->Dst
[0]);
969 s1_reg
= get_src_reg(gen
, CHAN_Y
, &inst
->Src
[0]);
970 s2_reg
= get_src_reg(gen
, CHAN_X
, &inst
->Src
[1]);
972 spe_fm(gen
->f
, tmp_reg
, s1_reg
, s2_reg
);
974 s1_reg
= get_src_reg(gen
, CHAN_X
, &inst
->Src
[0]);
975 s2_reg
= get_src_reg(gen
, CHAN_Y
, &inst
->Src
[1]);
976 /* t = x0 * y1 - t */
977 spe_fms(gen
->f
, tmp_reg
, s1_reg
, s2_reg
, tmp_reg
);
979 if (inst
->Dst
[0].Register
.WriteMask
& (1 << CHAN_Z
)) {
980 store_dest_reg(gen
, tmp_reg
, CHAN_Z
, &inst
->Dst
[0]);
989 * Emit inequality instruction.
990 * Note that the SPE fcgt instruction produces 0x0 and 0xffffffff as
991 * the result but OpenGL/TGSI needs 0.0 and 1.0 results.
992 * We can easily convert 0x0/0xffffffff to 0.0/1.0 with a bitwise AND.
995 emit_inequality(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
997 int ch
, s1_reg
[4], s2_reg
[4], d_reg
[4], one_reg
;
998 boolean complement
= FALSE
;
1000 one_reg
= get_const_one_reg(gen
);
1002 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1003 s1_reg
[ch
] = get_src_reg(gen
, ch
, &inst
->Src
[0]);
1004 s2_reg
[ch
] = get_src_reg(gen
, ch
, &inst
->Src
[1]);
1005 d_reg
[ch
] = get_dst_reg(gen
, ch
, &inst
->Dst
[0]);
1008 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1009 switch (inst
->Instruction
.Opcode
) {
1010 case TGSI_OPCODE_SGT
:
1011 spe_fcgt(gen
->f
, d_reg
[ch
], s1_reg
[ch
], s2_reg
[ch
]);
1013 case TGSI_OPCODE_SLT
:
1014 spe_fcgt(gen
->f
, d_reg
[ch
], s2_reg
[ch
], s1_reg
[ch
]);
1016 case TGSI_OPCODE_SGE
:
1017 spe_fcgt(gen
->f
, d_reg
[ch
], s2_reg
[ch
], s1_reg
[ch
]);
1020 case TGSI_OPCODE_SLE
:
1021 spe_fcgt(gen
->f
, d_reg
[ch
], s1_reg
[ch
], s2_reg
[ch
]);
1024 case TGSI_OPCODE_SEQ
:
1025 spe_fceq(gen
->f
, d_reg
[ch
], s1_reg
[ch
], s2_reg
[ch
]);
1027 case TGSI_OPCODE_SNE
:
1028 spe_fceq(gen
->f
, d_reg
[ch
], s1_reg
[ch
], s2_reg
[ch
]);
1036 /* convert d from 0x0/0xffffffff to 0.0/1.0 */
1037 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1038 /* d = d & one_reg */
1040 spe_andc(gen
->f
, d_reg
[ch
], one_reg
, d_reg
[ch
]);
1042 spe_and(gen
->f
, d_reg
[ch
], one_reg
, d_reg
[ch
]);
1045 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1046 store_dest_reg(gen
, d_reg
[ch
], ch
, &inst
->Dst
[0]);
1058 emit_CMP(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
1062 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1063 int s1_reg
= get_src_reg(gen
, ch
, &inst
->Src
[0]);
1064 int s2_reg
= get_src_reg(gen
, ch
, &inst
->Src
[1]);
1065 int s3_reg
= get_src_reg(gen
, ch
, &inst
->Src
[2]);
1066 int d_reg
= get_dst_reg(gen
, ch
, &inst
->Dst
[0]);
1067 int zero_reg
= get_itemp(gen
);
1069 spe_zero(gen
->f
, zero_reg
);
1071 /* d = (s1 < 0) ? s2 : s3 */
1072 spe_fcgt(gen
->f
, d_reg
, zero_reg
, s1_reg
);
1073 spe_selb(gen
->f
, d_reg
, s3_reg
, s2_reg
, d_reg
);
1075 store_dest_reg(gen
, d_reg
, ch
, &inst
->Dst
[0]);
1084 * Convert float to signed int
1085 * Convert signed int to float
1088 emit_TRUNC(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
1090 int ch
, s1_reg
[4], d_reg
[4];
1092 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1093 s1_reg
[ch
] = get_src_reg(gen
, ch
, &inst
->Src
[0]);
1094 d_reg
[ch
] = get_dst_reg(gen
, ch
, &inst
->Dst
[0]);
1097 /* Convert float to int */
1098 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1099 spe_cflts(gen
->f
, d_reg
[ch
], s1_reg
[ch
], 0);
1102 /* Convert int to float */
1103 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1104 spe_csflt(gen
->f
, d_reg
[ch
], d_reg
[ch
], 0);
1107 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1108 store_dest_reg(gen
, d_reg
[ch
], ch
, &inst
->Dst
[0]);
1118 * If negative int subtract one
1119 * Convert float to signed int
1120 * Convert signed int to float
1123 emit_FLR(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
1125 int ch
, s1_reg
[4], d_reg
[4], tmp_reg
[4], zero_reg
, one_reg
;
1127 zero_reg
= get_itemp(gen
);
1128 spe_zero(gen
->f
, zero_reg
);
1129 one_reg
= get_const_one_reg(gen
);
1131 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1132 s1_reg
[ch
] = get_src_reg(gen
, ch
, &inst
->Src
[0]);
1133 d_reg
[ch
] = get_dst_reg(gen
, ch
, &inst
->Dst
[0]);
1134 tmp_reg
[ch
] = get_itemp(gen
);
1137 /* If negative, subtract 1.0 */
1138 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1139 spe_fcgt(gen
->f
, tmp_reg
[ch
], zero_reg
, s1_reg
[ch
]);
1141 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1142 spe_selb(gen
->f
, tmp_reg
[ch
], zero_reg
, one_reg
, tmp_reg
[ch
]);
1144 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1145 spe_fs(gen
->f
, tmp_reg
[ch
], s1_reg
[ch
], tmp_reg
[ch
]);
1148 /* Convert float to int */
1149 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1150 spe_cflts(gen
->f
, tmp_reg
[ch
], tmp_reg
[ch
], 0);
1153 /* Convert int to float */
1154 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1155 spe_csflt(gen
->f
, d_reg
[ch
], tmp_reg
[ch
], 0);
1158 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1159 store_dest_reg(gen
, d_reg
[ch
], ch
, &inst
->Dst
[0]);
1168 * Compute frac = Input - FLR(Input)
1171 emit_FRC(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
1173 int ch
, s1_reg
[4], d_reg
[4], tmp_reg
[4], zero_reg
, one_reg
;
1175 zero_reg
= get_itemp(gen
);
1176 spe_zero(gen
->f
, zero_reg
);
1177 one_reg
= get_const_one_reg(gen
);
1179 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1180 s1_reg
[ch
] = get_src_reg(gen
, ch
, &inst
->Src
[0]);
1181 d_reg
[ch
] = get_dst_reg(gen
, ch
, &inst
->Dst
[0]);
1182 tmp_reg
[ch
] = get_itemp(gen
);
1185 /* If negative, subtract 1.0 */
1186 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1187 spe_fcgt(gen
->f
, tmp_reg
[ch
], zero_reg
, s1_reg
[ch
]);
1189 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1190 spe_selb(gen
->f
, tmp_reg
[ch
], zero_reg
, one_reg
, tmp_reg
[ch
]);
1192 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1193 spe_fs(gen
->f
, tmp_reg
[ch
], s1_reg
[ch
], tmp_reg
[ch
]);
1196 /* Convert float to int */
1197 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1198 spe_cflts(gen
->f
, tmp_reg
[ch
], tmp_reg
[ch
], 0);
1201 /* Convert int to float */
1202 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1203 spe_csflt(gen
->f
, tmp_reg
[ch
], tmp_reg
[ch
], 0);
1206 /* d = s1 - FLR(s1) */
1207 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1208 spe_fs(gen
->f
, d_reg
[ch
], s1_reg
[ch
], tmp_reg
[ch
]);
1212 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1213 store_dest_reg(gen
, d_reg
[ch
], ch
, &inst
->Dst
[0]);
1223 print_functions(struct cell_context
*cell
)
1225 struct cell_spu_function_info
*funcs
= &cell
->spu_functions
;
1227 for (i
= 0; i
< funcs
->num
; i
++) {
1228 printf("SPU func %u: %s at %u\n",
1229 i
, funcs
->names
[i
], funcs
->addrs
[i
]);
1236 lookup_function(struct cell_context
*cell
, const char *funcname
)
1238 const struct cell_spu_function_info
*funcs
= &cell
->spu_functions
;
1240 for (i
= 0; i
< funcs
->num
; i
++) {
1241 if (strcmp(funcs
->names
[i
], funcname
) == 0) {
1242 addr
= funcs
->addrs
[i
];
1245 assert(addr
&& "spu function not found");
1246 return addr
/ 4; /* discard 2 least significant bits */
1251 * Emit code to call a SPU function.
1252 * Used to implement instructions like SIN/COS/POW/TEX/etc.
1253 * If scalar, only the X components of the src regs are used, and the
1254 * result is replicated across the dest register's XYZW components.
1257 emit_function_call(struct codegen
*gen
,
1258 const struct tgsi_full_instruction
*inst
,
1259 char *funcname
, uint num_args
, boolean scalar
)
1261 const uint addr
= lookup_function(gen
->cell
, funcname
);
1264 int func_called
= FALSE
;
1266 int retval_reg
= -1;
1268 assert(num_args
<= 3);
1270 snprintf(comment
, sizeof(comment
), "CALL %s:", funcname
);
1271 spe_comment(gen
->f
, -4, comment
);
1274 for (a
= 0; a
< num_args
; a
++) {
1275 s_regs
[a
] = get_src_reg(gen
, CHAN_X
, &inst
->Src
[a
]);
1277 /* we'll call the function, put the return value in this register,
1278 * then replicate it across all write-enabled components in d_reg.
1280 retval_reg
= spe_allocate_available_register(gen
->f
);
1283 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1285 ubyte usedRegs
[SPE_NUM_REGS
];
1289 for (a
= 0; a
< num_args
; a
++) {
1290 s_regs
[a
] = get_src_reg(gen
, ch
, &inst
->Src
[a
]);
1294 d_reg
= get_dst_reg(gen
, ch
, &inst
->Dst
[0]);
1296 if (!scalar
|| !func_called
) {
1297 /* for a scalar function, we'll really only call the function once */
1299 numUsed
= spe_get_registers_used(gen
->f
, usedRegs
);
1300 assert(numUsed
< gen
->frame_size
/ 16 - 2);
1302 /* save registers to stack */
1303 for (i
= 0; i
< numUsed
; i
++) {
1304 uint reg
= usedRegs
[i
];
1306 spe_stqd(gen
->f
, reg
, SPE_REG_SP
, 16 * offset
);
1309 /* setup function arguments */
1310 for (a
= 0; a
< num_args
; a
++) {
1311 spe_move(gen
->f
, 3 + a
, s_regs
[a
]);
1314 /* branch to function, save return addr */
1315 spe_brasl(gen
->f
, SPE_REG_RA
, addr
);
1317 /* save function's return value */
1319 spe_move(gen
->f
, retval_reg
, 3);
1321 spe_move(gen
->f
, d_reg
, 3);
1323 /* restore registers from stack */
1324 for (i
= 0; i
< numUsed
; i
++) {
1325 uint reg
= usedRegs
[i
];
1326 if (reg
!= d_reg
&& reg
!= retval_reg
) {
1328 spe_lqd(gen
->f
, reg
, SPE_REG_SP
, 16 * offset
);
1336 spe_move(gen
->f
, d_reg
, retval_reg
);
1339 store_dest_reg(gen
, d_reg
, ch
, &inst
->Dst
[0]);
1344 spe_release_register(gen
->f
, retval_reg
);
1352 emit_TEX(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
1354 const uint target
= inst
->Texture
.Texture
;
1355 const uint unit
= inst
->Src
[1].Register
.Index
;
1358 int coord_regs
[4], d_regs
[4];
1361 case TGSI_TEXTURE_1D
:
1362 case TGSI_TEXTURE_2D
:
1363 addr
= lookup_function(gen
->cell
, "spu_tex_2d");
1365 case TGSI_TEXTURE_3D
:
1366 addr
= lookup_function(gen
->cell
, "spu_tex_3d");
1368 case TGSI_TEXTURE_CUBE
:
1369 addr
= lookup_function(gen
->cell
, "spu_tex_cube");
1372 ASSERT(0 && "unsupported texture target");
1376 assert(inst
->Src
[1].Register
.File
== TGSI_FILE_SAMPLER
);
1378 spe_comment(gen
->f
, -4, "CALL tex:");
1380 /* get src/dst reg info */
1381 for (ch
= 0; ch
< 4; ch
++) {
1382 coord_regs
[ch
] = get_src_reg(gen
, ch
, &inst
->Src
[0]);
1383 d_regs
[ch
] = get_dst_reg(gen
, ch
, &inst
->Dst
[0]);
1387 ubyte usedRegs
[SPE_NUM_REGS
];
1390 numUsed
= spe_get_registers_used(gen
->f
, usedRegs
);
1391 assert(numUsed
< gen
->frame_size
/ 16 - 2);
1393 /* save registers to stack */
1394 for (i
= 0; i
< numUsed
; i
++) {
1395 uint reg
= usedRegs
[i
];
1397 spe_stqd(gen
->f
, reg
, SPE_REG_SP
, 16 * offset
);
1400 /* setup function arguments (XXX depends on target) */
1401 for (i
= 0; i
< 4; i
++) {
1402 spe_move(gen
->f
, 3 + i
, coord_regs
[i
]);
1404 spe_load_uint(gen
->f
, 7, unit
); /* sampler unit */
1406 /* branch to function, save return addr */
1407 spe_brasl(gen
->f
, SPE_REG_RA
, addr
);
1409 /* save function's return values (four pixel's colors) */
1410 for (i
= 0; i
< 4; i
++) {
1411 spe_move(gen
->f
, d_regs
[i
], 3 + i
);
1414 /* restore registers from stack */
1415 for (i
= 0; i
< numUsed
; i
++) {
1416 uint reg
= usedRegs
[i
];
1417 if (reg
!= d_regs
[0] &&
1422 spe_lqd(gen
->f
, reg
, SPE_REG_SP
, 16 * offset
);
1427 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1428 store_dest_reg(gen
, d_regs
[ch
], ch
, &inst
->Dst
[0]);
1437 * KILL if any of src reg values are less than zero.
1440 emit_KIL(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
1443 int s_regs
[4], kil_reg
= -1, cmp_reg
, zero_reg
;
1445 spe_comment(gen
->f
, -4, "CALL kil:");
1447 /* zero = {0,0,0,0} */
1448 zero_reg
= get_itemp(gen
);
1449 spe_zero(gen
->f
, zero_reg
);
1451 cmp_reg
= get_itemp(gen
);
1454 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1455 s_regs
[ch
] = get_src_reg(gen
, ch
, &inst
->Src
[0]);
1458 /* test if any src regs are < 0 */
1459 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1461 /* cmp = 0 > src ? : ~0 : 0 */
1462 spe_fcgt(gen
->f
, cmp_reg
, zero_reg
, s_regs
[ch
]);
1463 /* kil = kil | cmp */
1464 spe_or(gen
->f
, kil_reg
, kil_reg
, cmp_reg
);
1467 kil_reg
= get_itemp(gen
);
1468 /* kil = 0 > src ? : ~0 : 0 */
1469 spe_fcgt(gen
->f
, kil_reg
, zero_reg
, s_regs
[ch
]);
1473 if (gen
->if_nesting
|| gen
->loop_nesting
) {
1474 /* may have been a conditional kil */
1475 spe_and(gen
->f
, kil_reg
, kil_reg
, gen
->exec_mask_reg
);
1478 /* allocate the kill mask reg if needed */
1479 if (gen
->kill_mask_reg
<= 0) {
1480 gen
->kill_mask_reg
= spe_allocate_available_register(gen
->f
);
1481 spe_move(gen
->f
, gen
->kill_mask_reg
, kil_reg
);
1484 spe_or(gen
->f
, gen
->kill_mask_reg
, gen
->kill_mask_reg
, kil_reg
);
1498 emit_MIN_MAX(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
1500 int ch
, s0_reg
[4], s1_reg
[4], d_reg
[4], tmp_reg
[4];
1502 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1503 s0_reg
[ch
] = get_src_reg(gen
, ch
, &inst
->Src
[0]);
1504 s1_reg
[ch
] = get_src_reg(gen
, ch
, &inst
->Src
[1]);
1505 d_reg
[ch
] = get_dst_reg(gen
, ch
, &inst
->Dst
[0]);
1506 tmp_reg
[ch
] = get_itemp(gen
);
1509 /* d = (s0 > s1) ? s0 : s1 */
1510 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1511 if (inst
->Instruction
.Opcode
== TGSI_OPCODE_MAX
)
1512 spe_fcgt(gen
->f
, tmp_reg
[ch
], s0_reg
[ch
], s1_reg
[ch
]);
1514 spe_fcgt(gen
->f
, tmp_reg
[ch
], s1_reg
[ch
], s0_reg
[ch
]);
1516 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1517 spe_selb(gen
->f
, d_reg
[ch
], s1_reg
[ch
], s0_reg
[ch
], tmp_reg
[ch
]);
1520 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1521 store_dest_reg(gen
, d_reg
[ch
], ch
, &inst
->Dst
[0]);
1530 * Emit code to update the execution mask.
1531 * This needs to be done whenever the execution status of a conditional
1532 * or loop is changed.
1535 emit_update_exec_mask(struct codegen
*gen
)
1537 const int exec_reg
= get_exec_mask_reg(gen
);
1538 const int cond_reg
= gen
->cond_mask_reg
;
1539 const int loop_reg
= gen
->loop_mask_reg
;
1541 spe_comment(gen
->f
, 0, "Update master execution mask");
1543 if (gen
->if_nesting
> 0 && gen
->loop_nesting
> 0) {
1544 /* exec_mask = cond_mask & loop_mask */
1545 assert(cond_reg
> 0);
1546 assert(loop_reg
> 0);
1547 spe_and(gen
->f
, exec_reg
, cond_reg
, loop_reg
);
1549 else if (gen
->if_nesting
> 0) {
1550 assert(cond_reg
> 0);
1551 spe_move(gen
->f
, exec_reg
, cond_reg
);
1553 else if (gen
->loop_nesting
> 0) {
1554 assert(loop_reg
> 0);
1555 spe_move(gen
->f
, exec_reg
, loop_reg
);
1558 spe_load_int(gen
->f
, exec_reg
, ~0x0);
1564 emit_IF(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
1566 const int channel
= 0;
1569 cond_reg
= get_cond_mask_reg(gen
);
1571 /* XXX push cond exec mask */
1573 spe_comment(gen
->f
, 0, "init conditional exec mask = ~0:");
1574 spe_load_int(gen
->f
, cond_reg
, ~0);
1576 /* update conditional execution mask with the predicate register */
1577 int tmp_reg
= get_itemp(gen
);
1578 int s1_reg
= get_src_reg(gen
, channel
, &inst
->Src
[0]);
1580 /* tmp = (s1_reg == 0) */
1581 spe_ceqi(gen
->f
, tmp_reg
, s1_reg
, 0);
1583 spe_complement(gen
->f
, tmp_reg
, tmp_reg
);
1584 /* cond_mask = cond_mask & tmp */
1585 spe_and(gen
->f
, cond_reg
, cond_reg
, tmp_reg
);
1589 /* update the master execution mask */
1590 emit_update_exec_mask(gen
);
1599 emit_ELSE(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
1601 const int cond_reg
= get_cond_mask_reg(gen
);
1603 spe_comment(gen
->f
, 0, "cond exec mask = !cond exec mask");
1604 spe_complement(gen
->f
, cond_reg
, cond_reg
);
1605 emit_update_exec_mask(gen
);
1612 emit_ENDIF(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
1614 /* XXX todo: pop cond exec mask */
1618 emit_update_exec_mask(gen
);
1625 emit_BGNLOOP(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
1627 int exec_reg
, loop_reg
;
1629 exec_reg
= get_exec_mask_reg(gen
);
1630 loop_reg
= get_loop_mask_reg(gen
);
1632 /* XXX push loop_exec mask */
1634 spe_comment(gen
->f
, 0*-4, "initialize loop exec mask = ~0");
1635 spe_load_int(gen
->f
, loop_reg
, ~0x0);
1637 gen
->loop_nesting
++;
1638 gen
->loop_start
= spe_code_size(gen
->f
); /* in bytes */
1645 emit_ENDLOOP(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
1647 const int loop_reg
= get_loop_mask_reg(gen
);
1648 const int tmp_reg
= get_itemp(gen
);
1651 /* tmp_reg = exec[0] | exec[1] | exec[2] | exec[3] */
1652 spe_orx(gen
->f
, tmp_reg
, loop_reg
);
1654 offset
= gen
->loop_start
- spe_code_size(gen
->f
); /* in bytes */
1656 /* branch back to top of loop if tmp_reg != 0 */
1657 spe_brnz(gen
->f
, tmp_reg
, offset
/ 4);
1659 /* XXX pop loop_exec mask */
1661 gen
->loop_nesting
--;
1663 emit_update_exec_mask(gen
);
1670 emit_BRK(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
1672 const int exec_reg
= get_exec_mask_reg(gen
);
1673 const int loop_reg
= get_loop_mask_reg(gen
);
1675 assert(gen
->loop_nesting
> 0);
1677 spe_comment(gen
->f
, 0, "loop exec mask &= ~master exec mask");
1678 spe_andc(gen
->f
, loop_reg
, loop_reg
, exec_reg
);
1680 emit_update_exec_mask(gen
);
1687 emit_CONT(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
)
1689 assert(gen
->loop_nesting
> 0);
1696 emit_DDX_DDY(struct codegen
*gen
, const struct tgsi_full_instruction
*inst
,
1701 FOR_EACH_ENABLED_CHANNEL(inst
, ch
) {
1702 int s_reg
= get_src_reg(gen
, ch
, &inst
->Src
[0]);
1703 int d_reg
= get_dst_reg(gen
, ch
, &inst
->Dst
[0]);
1705 int t1_reg
= get_itemp(gen
);
1706 int t2_reg
= get_itemp(gen
);
1708 spe_splat_word(gen
->f
, t1_reg
, s_reg
, 0); /* upper-left pixel */
1710 spe_splat_word(gen
->f
, t2_reg
, s_reg
, 1); /* upper-right pixel */
1713 spe_splat_word(gen
->f
, t2_reg
, s_reg
, 2); /* lower-left pixel */
1715 spe_fs(gen
->f
, d_reg
, t2_reg
, t1_reg
);
1727 * Emit END instruction.
1728 * We just return from the shader function at this point.
1730 * Note that there may be more code after this that would be
1731 * called by TGSI_OPCODE_CALL.
1734 emit_END(struct codegen
*gen
)
1742 * Emit code for the given instruction. Just a big switch stmt.
1745 emit_instruction(struct codegen
*gen
,
1746 const struct tgsi_full_instruction
*inst
)
1748 switch (inst
->Instruction
.Opcode
) {
1749 case TGSI_OPCODE_ARL
:
1750 return emit_ARL(gen
, inst
);
1751 case TGSI_OPCODE_MOV
:
1752 return emit_MOV(gen
, inst
);
1753 case TGSI_OPCODE_ADD
:
1754 case TGSI_OPCODE_SUB
:
1755 case TGSI_OPCODE_MUL
:
1756 return emit_binop(gen
, inst
);
1757 case TGSI_OPCODE_MAD
:
1758 return emit_MAD(gen
, inst
);
1759 case TGSI_OPCODE_LRP
:
1760 return emit_LRP(gen
, inst
);
1761 case TGSI_OPCODE_DP3
:
1762 return emit_DP3(gen
, inst
);
1763 case TGSI_OPCODE_DP4
:
1764 return emit_DP4(gen
, inst
);
1765 case TGSI_OPCODE_DPH
:
1766 return emit_DPH(gen
, inst
);
1767 case TGSI_OPCODE_NRM
:
1768 return emit_NRM3(gen
, inst
);
1769 case TGSI_OPCODE_XPD
:
1770 return emit_XPD(gen
, inst
);
1771 case TGSI_OPCODE_RCP
:
1772 case TGSI_OPCODE_RSQ
:
1773 return emit_RCP_RSQ(gen
, inst
);
1774 case TGSI_OPCODE_ABS
:
1775 return emit_ABS(gen
, inst
);
1776 case TGSI_OPCODE_SGT
:
1777 case TGSI_OPCODE_SLT
:
1778 case TGSI_OPCODE_SGE
:
1779 case TGSI_OPCODE_SLE
:
1780 case TGSI_OPCODE_SEQ
:
1781 case TGSI_OPCODE_SNE
:
1782 return emit_inequality(gen
, inst
);
1783 case TGSI_OPCODE_CMP
:
1784 return emit_CMP(gen
, inst
);
1785 case TGSI_OPCODE_MIN
:
1786 case TGSI_OPCODE_MAX
:
1787 return emit_MIN_MAX(gen
, inst
);
1788 case TGSI_OPCODE_TRUNC
:
1789 return emit_TRUNC(gen
, inst
);
1790 case TGSI_OPCODE_FLR
:
1791 return emit_FLR(gen
, inst
);
1792 case TGSI_OPCODE_FRC
:
1793 return emit_FRC(gen
, inst
);
1794 case TGSI_OPCODE_END
:
1795 return emit_END(gen
);
1797 case TGSI_OPCODE_COS
:
1798 return emit_function_call(gen
, inst
, "spu_cos", 1, TRUE
);
1799 case TGSI_OPCODE_SIN
:
1800 return emit_function_call(gen
, inst
, "spu_sin", 1, TRUE
);
1801 case TGSI_OPCODE_POW
:
1802 return emit_function_call(gen
, inst
, "spu_pow", 2, TRUE
);
1803 case TGSI_OPCODE_EX2
:
1804 return emit_function_call(gen
, inst
, "spu_exp2", 1, TRUE
);
1805 case TGSI_OPCODE_LG2
:
1806 return emit_function_call(gen
, inst
, "spu_log2", 1, TRUE
);
1807 case TGSI_OPCODE_TEX
:
1808 /* fall-through for now */
1809 case TGSI_OPCODE_TXD
:
1810 /* fall-through for now */
1811 case TGSI_OPCODE_TXB
:
1812 /* fall-through for now */
1813 case TGSI_OPCODE_TXL
:
1814 /* fall-through for now */
1815 case TGSI_OPCODE_TXP
:
1816 return emit_TEX(gen
, inst
);
1817 case TGSI_OPCODE_KIL
:
1818 return emit_KIL(gen
, inst
);
1820 case TGSI_OPCODE_IF
:
1821 return emit_IF(gen
, inst
);
1822 case TGSI_OPCODE_ELSE
:
1823 return emit_ELSE(gen
, inst
);
1824 case TGSI_OPCODE_ENDIF
:
1825 return emit_ENDIF(gen
, inst
);
1827 case TGSI_OPCODE_BGNLOOP
:
1828 return emit_BGNLOOP(gen
, inst
);
1829 case TGSI_OPCODE_ENDLOOP
:
1830 return emit_ENDLOOP(gen
, inst
);
1831 case TGSI_OPCODE_BRK
:
1832 return emit_BRK(gen
, inst
);
1833 case TGSI_OPCODE_CONT
:
1834 return emit_CONT(gen
, inst
);
1836 case TGSI_OPCODE_DDX
:
1837 return emit_DDX_DDY(gen
, inst
, TRUE
);
1838 case TGSI_OPCODE_DDY
:
1839 return emit_DDX_DDY(gen
, inst
, FALSE
);
1841 /* XXX lots more cases to do... */
1844 fprintf(stderr
, "Cell: unimplemented TGSI instruction %d!\n",
1845 inst
->Instruction
.Opcode
);
1855 * Emit code for a TGSI immediate value (vector of four floats).
1856 * This involves register allocation and initialization.
1857 * XXX the initialization should be done by a "prepare" stage, not
1858 * per quad execution!
1861 emit_immediate(struct codegen
*gen
, const struct tgsi_full_immediate
*immed
)
1865 assert(gen
->num_imm
< MAX_TEMPS
);
1867 for (ch
= 0; ch
< 4; ch
++) {
1868 float val
= immed
->u
[ch
].Float
;
1870 if (ch
> 0 && val
== immed
->u
[ch
- 1].Float
) {
1871 /* re-use previous register */
1872 gen
->imm_regs
[gen
->num_imm
][ch
] = gen
->imm_regs
[gen
->num_imm
][ch
- 1];
1876 int reg
= spe_allocate_available_register(gen
->f
);
1881 sprintf(str
, "init $%d = %f", reg
, val
);
1882 spe_comment(gen
->f
, 0, str
);
1884 /* update immediate map */
1885 gen
->imm_regs
[gen
->num_imm
][ch
] = reg
;
1887 /* emit initializer instruction */
1888 spe_load_float(gen
->f
, reg
, val
);
1900 * Emit "code" for a TGSI declaration.
1901 * We only care about TGSI TEMPORARY register declarations at this time.
1902 * For each TGSI TEMPORARY we allocate four SPE registers.
1905 emit_declaration(struct cell_context
*cell
,
1906 struct codegen
*gen
, const struct tgsi_full_declaration
*decl
)
1910 switch (decl
->Declaration
.File
) {
1911 case TGSI_FILE_TEMPORARY
:
1912 for (i
= decl
->Range
.First
;
1913 i
<= decl
->Range
.Last
;
1915 assert(i
< MAX_TEMPS
);
1916 for (ch
= 0; ch
< 4; ch
++) {
1917 gen
->temp_regs
[i
][ch
] = spe_allocate_available_register(gen
->f
);
1918 if (gen
->temp_regs
[i
][ch
] < 0)
1919 return FALSE
; /* out of regs */
1922 /* XXX if we run out of SPE registers, we need to spill
1923 * to SPU memory. someday...
1928 sprintf(buf
, "TGSI temp[%d] maps to SPU regs [$%d $%d $%d $%d]", i
,
1929 gen
->temp_regs
[i
][0], gen
->temp_regs
[i
][1],
1930 gen
->temp_regs
[i
][2], gen
->temp_regs
[i
][3]);
1931 spe_comment(gen
->f
, 0, buf
);
1945 * Translate TGSI shader code to SPE instructions. This is done when
1946 * the state tracker gives us a new shader (via pipe->create_fs_state()).
1948 * \param cell the rendering context (in)
1949 * \param tokens the TGSI shader (in)
1950 * \param f the generated function (out)
1953 cell_gen_fragment_program(struct cell_context
*cell
,
1954 const struct tgsi_token
*tokens
,
1955 struct spe_function
*f
)
1957 struct tgsi_parse_context parse
;
1961 memset(&gen
, 0, sizeof(gen
));
1965 /* For SPE function calls: reg $3 = first param, $4 = second param, etc. */
1966 gen
.inputs_reg
= 3; /* pointer to inputs array */
1967 gen
.outputs_reg
= 4; /* pointer to outputs array */
1968 gen
.constants_reg
= 5; /* pointer to constants array */
1970 spe_init_func(f
, SPU_MAX_FRAGMENT_PROGRAM_INSTS
* SPE_INST_SIZE
);
1971 spe_allocate_register(f
, gen
.inputs_reg
);
1972 spe_allocate_register(f
, gen
.outputs_reg
);
1973 spe_allocate_register(f
, gen
.constants_reg
);
1975 if (cell
->debug_flags
& CELL_DEBUG_ASM
) {
1976 spe_print_code(f
, TRUE
);
1978 printf("Begin %s\n", __FUNCTION__
);
1979 tgsi_dump(tokens
, 0);
1982 tgsi_parse_init(&parse
, tokens
);
1984 emit_prologue(&gen
);
1986 while (!tgsi_parse_end_of_tokens(&parse
) && !gen
.error
) {
1987 tgsi_parse_token(&parse
);
1989 switch (parse
.FullToken
.Token
.Type
) {
1990 case TGSI_TOKEN_TYPE_IMMEDIATE
:
1992 _debug_printf(" # ");
1993 tgsi_dump_immediate(&parse
.FullToken
.FullImmediate
);
1995 if (!emit_immediate(&gen
, &parse
.FullToken
.FullImmediate
))
1999 case TGSI_TOKEN_TYPE_DECLARATION
:
2001 _debug_printf(" # ");
2002 tgsi_dump_declaration(&parse
.FullToken
.FullDeclaration
);
2004 if (!emit_declaration(cell
, &gen
, &parse
.FullToken
.FullDeclaration
))
2008 case TGSI_TOKEN_TYPE_INSTRUCTION
:
2010 _debug_printf(" # ");
2012 tgsi_dump_instruction(&parse
.FullToken
.FullInstruction
, ic
);
2014 if (!emit_instruction(&gen
, &parse
.FullToken
.FullInstruction
))
2024 /* terminate the SPE code */
2025 return emit_END(&gen
);
2028 if (cell
->debug_flags
& CELL_DEBUG_ASM
) {
2029 printf("cell_gen_fragment_program nr instructions: %d\n", f
->num_inst
);
2030 printf("End %s\n", __FUNCTION__
);
2033 tgsi_parse_free( &parse
);