revert 213 commits (to 56092) from the last month. 10 still need work to resolve...
[AROS.git] / workbench / libs / mesa / src / gallium / auxiliary / tgsi / tgsi_ppc.c
blob4b47e289ed6f487cd7bac69f7f98f0f4dfbeeb74
1 /**************************************************************************
2 *
3 * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 /**
29 * TGSI to PowerPC code generation.
32 #include "pipe/p_config.h"
34 #if defined(PIPE_ARCH_PPC)
36 #include "util/u_debug.h"
37 #include "pipe/p_shader_tokens.h"
38 #include "util/u_math.h"
39 #include "util/u_memory.h"
40 #include "util/u_sse.h"
41 #include "tgsi/tgsi_info.h"
42 #include "tgsi/tgsi_parse.h"
43 #include "tgsi/tgsi_util.h"
44 #include "tgsi_dump.h"
45 #include "tgsi_exec.h"
46 #include "tgsi_ppc.h"
47 #include "rtasm/rtasm_ppc.h"
50 /**
51 * Since it's pretty much impossible to form PPC vector immediates, load
52 * them from memory here:
54 PIPE_ALIGN_VAR(16) const float
55 ppc_builtin_constants[] = {
56 1.0f, -128.0f, 128.0, 0.0
60 #define FOR_EACH_CHANNEL( CHAN )\
61 for (CHAN = 0; CHAN < NUM_CHANNELS; CHAN++)
63 #define IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
64 ((INST).Dst[0].Register.WriteMask & (1 << (CHAN)))
66 #define IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )\
67 if (IS_DST0_CHANNEL_ENABLED( INST, CHAN ))
69 #define FOR_EACH_DST0_ENABLED_CHANNEL( INST, CHAN )\
70 FOR_EACH_CHANNEL( CHAN )\
71 IF_IS_DST0_CHANNEL_ENABLED( INST, CHAN )
73 #define CHAN_X 0
74 #define CHAN_Y 1
75 #define CHAN_Z 2
76 #define CHAN_W 3
79 /**
80 * How many TGSI temps should be implemented with real PPC vector registers
81 * rather than memory.
83 #define MAX_PPC_TEMPS 3
86 /**
87 * Context/state used during code gen.
89 struct gen_context
91 struct ppc_function *f;
92 int inputs_reg; /**< GP register pointing to input params */
93 int outputs_reg; /**< GP register pointing to output params */
94 int temps_reg; /**< GP register pointing to temporary "registers" */
95 int immed_reg; /**< GP register pointing to immediates buffer */
96 int const_reg; /**< GP register pointing to constants buffer */
97 int builtins_reg; /**< GP register pointint to built-in constants */
99 int offset_reg; /**< used to reduce redundant li instructions */
100 int offset_value;
102 int one_vec; /**< vector register with {1.0, 1.0, 1.0, 1.0} */
103 int bit31_vec; /**< vector register with {1<<31, 1<<31, 1<<31, 1<<31} */
106 * Map TGSI temps to PPC vector temps.
107 * We have 32 PPC vector regs. Use 16 of them for storing 4 TGSI temps.
108 * XXX currently only do this for TGSI temps [0..MAX_PPC_TEMPS-1].
110 int temps_map[MAX_PPC_TEMPS][4];
113 * Cache of src registers.
114 * This is used to avoid redundant load instructions.
116 struct {
117 struct tgsi_full_src_register src;
118 uint chan;
119 uint vec;
120 } regs[12]; /* 3 src regs, 4 channels */
121 uint num_regs;
126 * Initialize code generation context.
128 static void
129 init_gen_context(struct gen_context *gen, struct ppc_function *func)
131 uint i;
133 memset(gen, 0, sizeof(*gen));
134 gen->f = func;
135 gen->inputs_reg = ppc_reserve_register(func, 3); /* first function param */
136 gen->outputs_reg = ppc_reserve_register(func, 4); /* second function param */
137 gen->temps_reg = ppc_reserve_register(func, 5); /* ... */
138 gen->immed_reg = ppc_reserve_register(func, 6);
139 gen->const_reg = ppc_reserve_register(func, 7);
140 gen->builtins_reg = ppc_reserve_register(func, 8);
141 gen->one_vec = -1;
142 gen->bit31_vec = -1;
143 gen->offset_reg = -1;
144 gen->offset_value = -9999999;
145 for (i = 0; i < MAX_PPC_TEMPS; i++) {
146 gen->temps_map[i][0] = ppc_allocate_vec_register(gen->f);
147 gen->temps_map[i][1] = ppc_allocate_vec_register(gen->f);
148 gen->temps_map[i][2] = ppc_allocate_vec_register(gen->f);
149 gen->temps_map[i][3] = ppc_allocate_vec_register(gen->f);
155 * Is the given TGSI register stored as a real PPC vector register?
157 static boolean
158 is_ppc_vec_temporary(const struct tgsi_full_src_register *reg)
160 return (reg->Register.File == TGSI_FILE_TEMPORARY &&
161 reg->Register.Index < MAX_PPC_TEMPS);
166 * Is the given TGSI register stored as a real PPC vector register?
168 static boolean
169 is_ppc_vec_temporary_dst(const struct tgsi_full_dst_register *reg)
171 return (reg->Register.File == TGSI_FILE_TEMPORARY &&
172 reg->Register.Index < MAX_PPC_TEMPS);
178 * All PPC vector load/store instructions form an effective address
179 * by adding the contents of two registers. For example:
180 * lvx v2,r8,r9 # v2 = memory[r8 + r9]
181 * stvx v2,r8,r9 # memory[r8 + r9] = v2;
182 * So our lvx/stvx instructions are typically preceded by an 'li' instruction
183 * to load r9 (above) with an immediate (an offset).
184 * This code emits that 'li' instruction, but only if the offset value is
185 * different than the previous 'li'.
186 * This optimization seems to save about 10% in the instruction count.
187 * Note that we need to unconditionally emit an 'li' inside basic blocks
188 * (such as inside loops).
190 static int
191 emit_li_offset(struct gen_context *gen, int offset)
193 if (gen->offset_reg <= 0) {
194 /* allocate a GP register for storing load/store offset */
195 gen->offset_reg = ppc_allocate_register(gen->f);
198 /* emit new 'li' if offset is changing */
199 if (gen->offset_value < 0 || gen->offset_value != offset) {
200 gen->offset_value = offset;
201 ppc_li(gen->f, gen->offset_reg, offset);
204 return gen->offset_reg;
207 #if 0
209 * Forces subsequent emit_li_offset() calls to emit an 'li'.
210 * To be called at the top of basic blocks.
212 static void
213 reset_li_offset(struct gen_context *gen)
215 gen->offset_value = -9999999;
217 #endif
221 * Load the given vector register with {value, value, value, value}.
222 * The value must be in the ppu_builtin_constants[] array.
223 * We wouldn't need this if there was a simple way to load PPC vector
224 * registers with immediate values!
226 static void
227 load_constant_vec(struct gen_context *gen, int dst_vec, float value)
229 uint pos;
230 for (pos = 0; pos < Elements(ppc_builtin_constants); pos++) {
231 if (ppc_builtin_constants[pos] == value) {
232 int offset = pos * 4;
233 int offset_reg = emit_li_offset(gen, offset);
235 /* Load 4-byte word into vector register.
236 * The vector slot depends on the effective address we load from.
237 * We know that our builtins start at a 16-byte boundary so we
238 * know that 'swizzle' tells us which vector slot will have the
239 * loaded word. The other vector slots will be undefined.
241 ppc_lvewx(gen->f, dst_vec, gen->builtins_reg, offset_reg);
242 /* splat word[pos % 4] across the vector reg */
243 ppc_vspltw(gen->f, dst_vec, dst_vec, pos % 4);
244 return;
247 assert(0 && "Need to add new constant to ppc_builtin_constants array");
252 * Return index of vector register containing {1.0, 1.0, 1.0, 1.0}.
254 static int
255 gen_one_vec(struct gen_context *gen)
257 if (gen->one_vec < 0) {
258 gen->one_vec = ppc_allocate_vec_register(gen->f);
259 load_constant_vec(gen, gen->one_vec, 1.0f);
261 return gen->one_vec;
265 * Return index of vector register containing {1<<31, 1<<31, 1<<31, 1<<31}.
267 static int
268 gen_get_bit31_vec(struct gen_context *gen)
270 if (gen->bit31_vec < 0) {
271 gen->bit31_vec = ppc_allocate_vec_register(gen->f);
272 ppc_vspltisw(gen->f, gen->bit31_vec, -1);
273 ppc_vslw(gen->f, gen->bit31_vec, gen->bit31_vec, gen->bit31_vec);
275 return gen->bit31_vec;
280 * Register fetch. Return PPC vector register with result.
282 static int
283 emit_fetch(struct gen_context *gen,
284 const struct tgsi_full_src_register *reg,
285 const unsigned chan_index)
287 uint swizzle = tgsi_util_get_full_src_register_swizzle(reg, chan_index);
288 int dst_vec = -1;
290 switch (swizzle) {
291 case TGSI_SWIZZLE_X:
292 case TGSI_SWIZZLE_Y:
293 case TGSI_SWIZZLE_Z:
294 case TGSI_SWIZZLE_W:
295 switch (reg->Register.File) {
296 case TGSI_FILE_INPUT:
298 int offset = (reg->Register.Index * 4 + swizzle) * 16;
299 int offset_reg = emit_li_offset(gen, offset);
300 dst_vec = ppc_allocate_vec_register(gen->f);
301 ppc_lvx(gen->f, dst_vec, gen->inputs_reg, offset_reg);
303 break;
304 case TGSI_FILE_SYSTEM_VALUE:
305 assert(!"unhandled system value in tgsi_ppc.c");
306 break;
307 case TGSI_FILE_TEMPORARY:
308 if (is_ppc_vec_temporary(reg)) {
309 /* use PPC vec register */
310 dst_vec = gen->temps_map[reg->Register.Index][swizzle];
312 else {
313 /* use memory-based temp register "file" */
314 int offset = (reg->Register.Index * 4 + swizzle) * 16;
315 int offset_reg = emit_li_offset(gen, offset);
316 dst_vec = ppc_allocate_vec_register(gen->f);
317 ppc_lvx(gen->f, dst_vec, gen->temps_reg, offset_reg);
319 break;
320 case TGSI_FILE_IMMEDIATE:
322 int offset = (reg->Register.Index * 4 + swizzle) * 4;
323 int offset_reg = emit_li_offset(gen, offset);
324 dst_vec = ppc_allocate_vec_register(gen->f);
325 /* Load 4-byte word into vector register.
326 * The vector slot depends on the effective address we load from.
327 * We know that our immediates start at a 16-byte boundary so we
328 * know that 'swizzle' tells us which vector slot will have the
329 * loaded word. The other vector slots will be undefined.
331 ppc_lvewx(gen->f, dst_vec, gen->immed_reg, offset_reg);
332 /* splat word[swizzle] across the vector reg */
333 ppc_vspltw(gen->f, dst_vec, dst_vec, swizzle);
335 break;
336 case TGSI_FILE_CONSTANT:
338 int offset = (reg->Register.Index * 4 + swizzle) * 4;
339 int offset_reg = emit_li_offset(gen, offset);
340 dst_vec = ppc_allocate_vec_register(gen->f);
341 /* Load 4-byte word into vector register.
342 * The vector slot depends on the effective address we load from.
343 * We know that our constants start at a 16-byte boundary so we
344 * know that 'swizzle' tells us which vector slot will have the
345 * loaded word. The other vector slots will be undefined.
347 ppc_lvewx(gen->f, dst_vec, gen->const_reg, offset_reg);
348 /* splat word[swizzle] across the vector reg */
349 ppc_vspltw(gen->f, dst_vec, dst_vec, swizzle);
351 break;
352 default:
353 assert( 0 );
355 break;
356 default:
357 assert( 0 );
360 assert(dst_vec >= 0);
363 uint sign_op = tgsi_util_get_full_src_register_sign_mode(reg, chan_index);
364 if (sign_op != TGSI_UTIL_SIGN_KEEP) {
365 int bit31_vec = gen_get_bit31_vec(gen);
366 int dst_vec2;
368 if (is_ppc_vec_temporary(reg)) {
369 /* need to use a new temp */
370 dst_vec2 = ppc_allocate_vec_register(gen->f);
372 else {
373 dst_vec2 = dst_vec;
376 switch (sign_op) {
377 case TGSI_UTIL_SIGN_CLEAR:
378 /* vec = vec & ~bit31 */
379 ppc_vandc(gen->f, dst_vec2, dst_vec, bit31_vec);
380 break;
381 case TGSI_UTIL_SIGN_SET:
382 /* vec = vec | bit31 */
383 ppc_vor(gen->f, dst_vec2, dst_vec, bit31_vec);
384 break;
385 case TGSI_UTIL_SIGN_TOGGLE:
386 /* vec = vec ^ bit31 */
387 ppc_vxor(gen->f, dst_vec2, dst_vec, bit31_vec);
388 break;
389 default:
390 assert(0);
392 return dst_vec2;
396 return dst_vec;
402 * Test if two TGSI src registers refer to the same memory location.
403 * We use this to avoid redundant register loads.
405 static boolean
406 equal_src_locs(const struct tgsi_full_src_register *a, uint chan_a,
407 const struct tgsi_full_src_register *b, uint chan_b)
409 int swz_a, swz_b;
410 int sign_a, sign_b;
411 if (a->Register.File != b->Register.File)
412 return FALSE;
413 if (a->Register.Index != b->Register.Index)
414 return FALSE;
415 swz_a = tgsi_util_get_full_src_register_swizzle(a, chan_a);
416 swz_b = tgsi_util_get_full_src_register_swizzle(b, chan_b);
417 if (swz_a != swz_b)
418 return FALSE;
419 sign_a = tgsi_util_get_full_src_register_sign_mode(a, chan_a);
420 sign_b = tgsi_util_get_full_src_register_sign_mode(b, chan_b);
421 if (sign_a != sign_b)
422 return FALSE;
423 return TRUE;
428 * Given a TGSI src register and channel index, return the PPC vector
429 * register containing the value. We use a cache to prevent re-loading
430 * the same register multiple times.
431 * \return index of PPC vector register with the desired src operand
433 static int
434 get_src_vec(struct gen_context *gen,
435 struct tgsi_full_instruction *inst, int src_reg, uint chan)
437 const struct tgsi_full_src_register *src =
438 &inst->Src[src_reg];
439 int vec;
440 uint i;
442 /* check the cache */
443 for (i = 0; i < gen->num_regs; i++) {
444 if (equal_src_locs(&gen->regs[i].src, gen->regs[i].chan, src, chan)) {
445 /* cache hit */
446 assert(gen->regs[i].vec >= 0);
447 return gen->regs[i].vec;
451 /* cache miss: allocate new vec reg and emit fetch/load code */
452 vec = emit_fetch(gen, src, chan);
453 gen->regs[gen->num_regs].src = *src;
454 gen->regs[gen->num_regs].chan = chan;
455 gen->regs[gen->num_regs].vec = vec;
456 gen->num_regs++;
458 assert(gen->num_regs <= Elements(gen->regs));
460 assert(vec >= 0);
462 return vec;
467 * Clear the src operand cache. To be called at the end of each emit function.
469 static void
470 release_src_vecs(struct gen_context *gen)
472 uint i;
473 for (i = 0; i < gen->num_regs; i++) {
474 const struct tgsi_full_src_register src = gen->regs[i].src;
475 if (!is_ppc_vec_temporary(&src)) {
476 ppc_release_vec_register(gen->f, gen->regs[i].vec);
479 gen->num_regs = 0;
484 static int
485 get_dst_vec(struct gen_context *gen,
486 const struct tgsi_full_instruction *inst,
487 unsigned chan_index)
489 const struct tgsi_full_dst_register *reg = &inst->Dst[0];
491 if (is_ppc_vec_temporary_dst(reg)) {
492 int vec = gen->temps_map[reg->Register.Index][chan_index];
493 return vec;
495 else {
496 return ppc_allocate_vec_register(gen->f);
502 * Register store. Store 'src_vec' at location indicated by 'reg'.
503 * \param free_vec Should the src_vec be released when done?
505 static void
506 emit_store(struct gen_context *gen,
507 int src_vec,
508 const struct tgsi_full_instruction *inst,
509 unsigned chan_index,
510 boolean free_vec)
512 const struct tgsi_full_dst_register *reg = &inst->Dst[0];
514 switch (reg->Register.File) {
515 case TGSI_FILE_OUTPUT:
517 int offset = (reg->Register.Index * 4 + chan_index) * 16;
518 int offset_reg = emit_li_offset(gen, offset);
519 ppc_stvx(gen->f, src_vec, gen->outputs_reg, offset_reg);
521 break;
522 case TGSI_FILE_TEMPORARY:
523 if (is_ppc_vec_temporary_dst(reg)) {
524 if (!free_vec) {
525 int dst_vec = gen->temps_map[reg->Register.Index][chan_index];
526 if (dst_vec != src_vec)
527 ppc_vmove(gen->f, dst_vec, src_vec);
529 free_vec = FALSE;
531 else {
532 int offset = (reg->Register.Index * 4 + chan_index) * 16;
533 int offset_reg = emit_li_offset(gen, offset);
534 ppc_stvx(gen->f, src_vec, gen->temps_reg, offset_reg);
536 break;
537 #if 0
538 case TGSI_FILE_ADDRESS:
539 emit_addrs(
540 func,
541 xmm,
542 reg->Register.Index,
543 chan_index );
544 break;
545 #endif
546 default:
547 assert( 0 );
550 #if 0
551 switch( inst->Instruction.Saturate ) {
552 case TGSI_SAT_NONE:
553 break;
555 case TGSI_SAT_ZERO_ONE:
556 /* assert( 0 ); */
557 break;
559 case TGSI_SAT_MINUS_PLUS_ONE:
560 assert( 0 );
561 break;
563 #endif
565 if (free_vec)
566 ppc_release_vec_register(gen->f, src_vec);
570 static void
571 emit_scalar_unaryop(struct gen_context *gen, struct tgsi_full_instruction *inst)
573 int v0, v1;
574 uint chan_index;
576 v0 = get_src_vec(gen, inst, 0, CHAN_X);
577 v1 = ppc_allocate_vec_register(gen->f);
579 switch (inst->Instruction.Opcode) {
580 case TGSI_OPCODE_RSQ:
581 /* v1 = 1.0 / sqrt(v0) */
582 ppc_vrsqrtefp(gen->f, v1, v0);
583 break;
584 case TGSI_OPCODE_RCP:
585 /* v1 = 1.0 / v0 */
586 ppc_vrefp(gen->f, v1, v0);
587 break;
588 default:
589 assert(0);
592 FOR_EACH_DST0_ENABLED_CHANNEL( *inst, chan_index ) {
593 emit_store(gen, v1, inst, chan_index, FALSE);
596 release_src_vecs(gen);
597 ppc_release_vec_register(gen->f, v1);
601 static void
602 emit_unaryop(struct gen_context *gen, struct tgsi_full_instruction *inst)
604 uint chan_index;
606 FOR_EACH_DST0_ENABLED_CHANNEL(*inst, chan_index) {
607 int v0 = get_src_vec(gen, inst, 0, chan_index); /* v0 = srcreg[0] */
608 int v1 = get_dst_vec(gen, inst, chan_index);
609 switch (inst->Instruction.Opcode) {
610 case TGSI_OPCODE_ABS:
611 /* turn off the most significant bit of each vector float word */
613 int bit31_vec = gen_get_bit31_vec(gen);
614 ppc_vandc(gen->f, v1, v0, bit31_vec); /* v1 = v0 & ~bit31 */
616 break;
617 case TGSI_OPCODE_FLR:
618 ppc_vrfim(gen->f, v1, v0); /* v1 = floor(v0) */
619 break;
620 case TGSI_OPCODE_FRC:
621 ppc_vrfim(gen->f, v1, v0); /* tmp = floor(v0) */
622 ppc_vsubfp(gen->f, v1, v0, v1); /* v1 = v0 - v1 */
623 break;
624 case TGSI_OPCODE_EX2:
625 ppc_vexptefp(gen->f, v1, v0); /* v1 = 2^v0 */
626 break;
627 case TGSI_OPCODE_LG2:
628 /* XXX this may be broken! */
629 ppc_vlogefp(gen->f, v1, v0); /* v1 = log2(v0) */
630 break;
631 case TGSI_OPCODE_MOV:
632 if (v0 != v1)
633 ppc_vmove(gen->f, v1, v0);
634 break;
635 default:
636 assert(0);
638 emit_store(gen, v1, inst, chan_index, TRUE); /* store v0 */
641 release_src_vecs(gen);
645 static void
646 emit_binop(struct gen_context *gen, struct tgsi_full_instruction *inst)
648 int zero_vec = -1;
649 uint chan;
651 if (inst->Instruction.Opcode == TGSI_OPCODE_MUL) {
652 zero_vec = ppc_allocate_vec_register(gen->f);
653 ppc_vzero(gen->f, zero_vec);
656 FOR_EACH_DST0_ENABLED_CHANNEL(*inst, chan) {
657 /* fetch src operands */
658 int v0 = get_src_vec(gen, inst, 0, chan);
659 int v1 = get_src_vec(gen, inst, 1, chan);
660 int v2 = get_dst_vec(gen, inst, chan);
662 /* emit binop */
663 switch (inst->Instruction.Opcode) {
664 case TGSI_OPCODE_ADD:
665 ppc_vaddfp(gen->f, v2, v0, v1);
666 break;
667 case TGSI_OPCODE_SUB:
668 ppc_vsubfp(gen->f, v2, v0, v1);
669 break;
670 case TGSI_OPCODE_MUL:
671 ppc_vmaddfp(gen->f, v2, v0, v1, zero_vec);
672 break;
673 case TGSI_OPCODE_MIN:
674 ppc_vminfp(gen->f, v2, v0, v1);
675 break;
676 case TGSI_OPCODE_MAX:
677 ppc_vmaxfp(gen->f, v2, v0, v1);
678 break;
679 default:
680 assert(0);
683 /* store v2 */
684 emit_store(gen, v2, inst, chan, TRUE);
687 if (inst->Instruction.Opcode == TGSI_OPCODE_MUL)
688 ppc_release_vec_register(gen->f, zero_vec);
690 release_src_vecs(gen);
694 static void
695 emit_triop(struct gen_context *gen, struct tgsi_full_instruction *inst)
697 uint chan;
699 FOR_EACH_DST0_ENABLED_CHANNEL(*inst, chan) {
700 /* fetch src operands */
701 int v0 = get_src_vec(gen, inst, 0, chan);
702 int v1 = get_src_vec(gen, inst, 1, chan);
703 int v2 = get_src_vec(gen, inst, 2, chan);
704 int v3 = get_dst_vec(gen, inst, chan);
706 /* emit ALU */
707 switch (inst->Instruction.Opcode) {
708 case TGSI_OPCODE_MAD:
709 ppc_vmaddfp(gen->f, v3, v0, v1, v2); /* v3 = v0 * v1 + v2 */
710 break;
711 case TGSI_OPCODE_LRP:
712 ppc_vsubfp(gen->f, v3, v1, v2); /* v3 = v1 - v2 */
713 ppc_vmaddfp(gen->f, v3, v0, v3, v2); /* v3 = v0 * v3 + v2 */
714 break;
715 default:
716 assert(0);
719 /* store v3 */
720 emit_store(gen, v3, inst, chan, TRUE);
723 release_src_vecs(gen);
728 * Vector comparisons, resulting in 1.0 or 0.0 values.
730 static void
731 emit_inequality(struct gen_context *gen, struct tgsi_full_instruction *inst)
733 uint chan;
734 int one_vec = gen_one_vec(gen);
736 FOR_EACH_DST0_ENABLED_CHANNEL(*inst, chan) {
737 /* fetch src operands */
738 int v0 = get_src_vec(gen, inst, 0, chan);
739 int v1 = get_src_vec(gen, inst, 1, chan);
740 int v2 = get_dst_vec(gen, inst, chan);
741 boolean complement = FALSE;
743 switch (inst->Instruction.Opcode) {
744 case TGSI_OPCODE_SNE:
745 complement = TRUE;
746 /* fall-through */
747 case TGSI_OPCODE_SEQ:
748 ppc_vcmpeqfpx(gen->f, v2, v0, v1); /* v2 = v0 == v1 ? ~0 : 0 */
749 break;
751 case TGSI_OPCODE_SGE:
752 complement = TRUE;
753 /* fall-through */
754 case TGSI_OPCODE_SLT:
755 ppc_vcmpgtfpx(gen->f, v2, v1, v0); /* v2 = v1 > v0 ? ~0 : 0 */
756 break;
758 case TGSI_OPCODE_SLE:
759 complement = TRUE;
760 /* fall-through */
761 case TGSI_OPCODE_SGT:
762 ppc_vcmpgtfpx(gen->f, v2, v0, v1); /* v2 = v0 > v1 ? ~0 : 0 */
763 break;
764 default:
765 assert(0);
768 /* v2 is now {0,0,0,0} or {~0,~0,~0,~0} */
770 if (complement)
771 ppc_vandc(gen->f, v2, one_vec, v2); /* v2 = one_vec & ~v2 */
772 else
773 ppc_vand(gen->f, v2, one_vec, v2); /* v2 = one_vec & v2 */
775 /* store v2 */
776 emit_store(gen, v2, inst, chan, TRUE);
779 release_src_vecs(gen);
783 static void
784 emit_dotprod(struct gen_context *gen, struct tgsi_full_instruction *inst)
786 int v0, v1, v2;
787 uint chan_index;
789 v2 = ppc_allocate_vec_register(gen->f);
791 ppc_vzero(gen->f, v2); /* v2 = {0, 0, 0, 0} */
793 v0 = get_src_vec(gen, inst, 0, CHAN_X); /* v0 = src0.XXXX */
794 v1 = get_src_vec(gen, inst, 1, CHAN_X); /* v1 = src1.XXXX */
795 ppc_vmaddfp(gen->f, v2, v0, v1, v2); /* v2 = v0 * v1 + v2 */
797 v0 = get_src_vec(gen, inst, 0, CHAN_Y); /* v0 = src0.YYYY */
798 v1 = get_src_vec(gen, inst, 1, CHAN_Y); /* v1 = src1.YYYY */
799 ppc_vmaddfp(gen->f, v2, v0, v1, v2); /* v2 = v0 * v1 + v2 */
801 v0 = get_src_vec(gen, inst, 0, CHAN_Z); /* v0 = src0.ZZZZ */
802 v1 = get_src_vec(gen, inst, 1, CHAN_Z); /* v1 = src1.ZZZZ */
803 ppc_vmaddfp(gen->f, v2, v0, v1, v2); /* v2 = v0 * v1 + v2 */
805 if (inst->Instruction.Opcode == TGSI_OPCODE_DP4) {
806 v0 = get_src_vec(gen, inst, 0, CHAN_W); /* v0 = src0.WWWW */
807 v1 = get_src_vec(gen, inst, 1, CHAN_W); /* v1 = src1.WWWW */
808 ppc_vmaddfp(gen->f, v2, v0, v1, v2); /* v2 = v0 * v1 + v2 */
810 else if (inst->Instruction.Opcode == TGSI_OPCODE_DPH) {
811 v1 = get_src_vec(gen, inst, 1, CHAN_W); /* v1 = src1.WWWW */
812 ppc_vaddfp(gen->f, v2, v2, v1); /* v2 = v2 + v1 */
815 FOR_EACH_DST0_ENABLED_CHANNEL(*inst, chan_index) {
816 emit_store(gen, v2, inst, chan_index, FALSE); /* store v2, free v2 later */
819 release_src_vecs(gen);
821 ppc_release_vec_register(gen->f, v2);
825 /** Approximation for vr = pow(va, vb) */
826 static void
827 ppc_vec_pow(struct ppc_function *f, int vr, int va, int vb)
829 /* pow(a,b) ~= exp2(log2(a) * b) */
830 int t_vec = ppc_allocate_vec_register(f);
831 int zero_vec = ppc_allocate_vec_register(f);
833 ppc_vzero(f, zero_vec);
835 ppc_vlogefp(f, t_vec, va); /* t = log2(va) */
836 ppc_vmaddfp(f, t_vec, t_vec, vb, zero_vec); /* t = t * vb + zero */
837 ppc_vexptefp(f, vr, t_vec); /* vr = 2^t */
839 ppc_release_vec_register(f, t_vec);
840 ppc_release_vec_register(f, zero_vec);
844 static void
845 emit_lit(struct gen_context *gen, struct tgsi_full_instruction *inst)
847 int one_vec = gen_one_vec(gen);
849 /* Compute X */
850 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X)) {
851 emit_store(gen, one_vec, inst, CHAN_X, FALSE);
854 /* Compute Y, Z */
855 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y) ||
856 IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z)) {
857 int x_vec;
858 int zero_vec = ppc_allocate_vec_register(gen->f);
860 x_vec = get_src_vec(gen, inst, 0, CHAN_X); /* x_vec = src[0].x */
862 ppc_vzero(gen->f, zero_vec); /* zero = {0,0,0,0} */
863 ppc_vmaxfp(gen->f, x_vec, x_vec, zero_vec); /* x_vec = max(x_vec, 0) */
865 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y)) {
866 emit_store(gen, x_vec, inst, CHAN_Y, FALSE);
869 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z)) {
870 int y_vec, w_vec;
871 int z_vec = ppc_allocate_vec_register(gen->f);
872 int pow_vec = ppc_allocate_vec_register(gen->f);
873 int pos_vec = ppc_allocate_vec_register(gen->f);
874 int p128_vec = ppc_allocate_vec_register(gen->f);
875 int n128_vec = ppc_allocate_vec_register(gen->f);
877 y_vec = get_src_vec(gen, inst, 0, CHAN_Y); /* y_vec = src[0].y */
878 ppc_vmaxfp(gen->f, y_vec, y_vec, zero_vec); /* y_vec = max(y_vec, 0) */
880 w_vec = get_src_vec(gen, inst, 0, CHAN_W); /* w_vec = src[0].w */
882 /* clamp W to [-128, 128] */
883 load_constant_vec(gen, p128_vec, 128.0f);
884 load_constant_vec(gen, n128_vec, -128.0f);
885 ppc_vmaxfp(gen->f, w_vec, w_vec, n128_vec); /* w = max(w, -128) */
886 ppc_vminfp(gen->f, w_vec, w_vec, p128_vec); /* w = min(w, 128) */
888 /* if temp.x > 0
889 * z = pow(tmp.y, tmp.w)
890 * else
891 * z = 0.0
893 ppc_vec_pow(gen->f, pow_vec, y_vec, w_vec); /* pow = pow(y, w) */
894 ppc_vcmpgtfpx(gen->f, pos_vec, x_vec, zero_vec); /* pos = x > 0 */
895 ppc_vand(gen->f, z_vec, pow_vec, pos_vec); /* z = pow & pos */
897 emit_store(gen, z_vec, inst, CHAN_Z, FALSE);
899 ppc_release_vec_register(gen->f, z_vec);
900 ppc_release_vec_register(gen->f, pow_vec);
901 ppc_release_vec_register(gen->f, pos_vec);
902 ppc_release_vec_register(gen->f, p128_vec);
903 ppc_release_vec_register(gen->f, n128_vec);
906 ppc_release_vec_register(gen->f, zero_vec);
909 /* Compute W */
910 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_W)) {
911 emit_store(gen, one_vec, inst, CHAN_W, FALSE);
914 release_src_vecs(gen);
918 static void
919 emit_exp(struct gen_context *gen, struct tgsi_full_instruction *inst)
921 const int one_vec = gen_one_vec(gen);
922 int src_vec;
924 /* get src arg */
925 src_vec = get_src_vec(gen, inst, 0, CHAN_X);
927 /* Compute X = 2^floor(src) */
928 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X)) {
929 int dst_vec = get_dst_vec(gen, inst, CHAN_X);
930 int tmp_vec = ppc_allocate_vec_register(gen->f);
931 ppc_vrfim(gen->f, tmp_vec, src_vec); /* tmp = floor(src); */
932 ppc_vexptefp(gen->f, dst_vec, tmp_vec); /* dst = 2 ^ tmp */
933 emit_store(gen, dst_vec, inst, CHAN_X, TRUE);
934 ppc_release_vec_register(gen->f, tmp_vec);
937 /* Compute Y = src - floor(src) */
938 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y)) {
939 int dst_vec = get_dst_vec(gen, inst, CHAN_Y);
940 int tmp_vec = ppc_allocate_vec_register(gen->f);
941 ppc_vrfim(gen->f, tmp_vec, src_vec); /* tmp = floor(src); */
942 ppc_vsubfp(gen->f, dst_vec, src_vec, tmp_vec); /* dst = src - tmp */
943 emit_store(gen, dst_vec, inst, CHAN_Y, TRUE);
944 ppc_release_vec_register(gen->f, tmp_vec);
947 /* Compute Z = RoughApprox2ToX(src) */
948 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z)) {
949 int dst_vec = get_dst_vec(gen, inst, CHAN_Z);
950 ppc_vexptefp(gen->f, dst_vec, src_vec); /* dst = 2 ^ src */
951 emit_store(gen, dst_vec, inst, CHAN_Z, TRUE);
954 /* Compute W = 1.0 */
955 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_W)) {
956 emit_store(gen, one_vec, inst, CHAN_W, FALSE);
959 release_src_vecs(gen);
963 static void
964 emit_log(struct gen_context *gen, struct tgsi_full_instruction *inst)
966 const int bit31_vec = gen_get_bit31_vec(gen);
967 const int one_vec = gen_one_vec(gen);
968 int src_vec, abs_vec;
970 /* get src arg */
971 src_vec = get_src_vec(gen, inst, 0, CHAN_X);
973 /* compute abs(src) */
974 abs_vec = ppc_allocate_vec_register(gen->f);
975 ppc_vandc(gen->f, abs_vec, src_vec, bit31_vec); /* abs = src & ~bit31 */
977 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X) &&
978 IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y)) {
980 /* compute tmp = floor(log2(abs)) */
981 int tmp_vec = ppc_allocate_vec_register(gen->f);
982 ppc_vlogefp(gen->f, tmp_vec, abs_vec); /* tmp = log2(abs) */
983 ppc_vrfim(gen->f, tmp_vec, tmp_vec); /* tmp = floor(tmp); */
985 /* Compute X = tmp */
986 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X)) {
987 emit_store(gen, tmp_vec, inst, CHAN_X, FALSE);
990 /* Compute Y = abs / 2^tmp */
991 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y)) {
992 const int zero_vec = ppc_allocate_vec_register(gen->f);
993 ppc_vzero(gen->f, zero_vec);
994 ppc_vexptefp(gen->f, tmp_vec, tmp_vec); /* tmp = 2 ^ tmp */
995 ppc_vrefp(gen->f, tmp_vec, tmp_vec); /* tmp = 1 / tmp */
996 /* tmp = abs * tmp + zero */
997 ppc_vmaddfp(gen->f, tmp_vec, abs_vec, tmp_vec, zero_vec);
998 emit_store(gen, tmp_vec, inst, CHAN_Y, FALSE);
999 ppc_release_vec_register(gen->f, zero_vec);
1002 ppc_release_vec_register(gen->f, tmp_vec);
1005 /* Compute Z = RoughApproxLog2(abs) */
1006 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z)) {
1007 int dst_vec = get_dst_vec(gen, inst, CHAN_Z);
1008 ppc_vlogefp(gen->f, dst_vec, abs_vec); /* dst = log2(abs) */
1009 emit_store(gen, dst_vec, inst, CHAN_Z, TRUE);
1012 /* Compute W = 1.0 */
1013 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_W)) {
1014 emit_store(gen, one_vec, inst, CHAN_W, FALSE);
1017 ppc_release_vec_register(gen->f, abs_vec);
1018 release_src_vecs(gen);
1022 static void
1023 emit_pow(struct gen_context *gen, struct tgsi_full_instruction *inst)
1025 int s0_vec = get_src_vec(gen, inst, 0, CHAN_X);
1026 int s1_vec = get_src_vec(gen, inst, 1, CHAN_X);
1027 int pow_vec = ppc_allocate_vec_register(gen->f);
1028 int chan;
1030 ppc_vec_pow(gen->f, pow_vec, s0_vec, s1_vec);
1032 FOR_EACH_DST0_ENABLED_CHANNEL(*inst, chan) {
1033 emit_store(gen, pow_vec, inst, chan, FALSE);
1036 ppc_release_vec_register(gen->f, pow_vec);
1038 release_src_vecs(gen);
1042 static void
1043 emit_xpd(struct gen_context *gen, struct tgsi_full_instruction *inst)
1045 int x0_vec = 0, y0_vec = 0, z0_vec = 0;
1046 int x1_vec = 0, y1_vec = 0, z1_vec = 0;
1047 int zero_vec, tmp_vec;
1048 int tmp2_vec;
1050 zero_vec = ppc_allocate_vec_register(gen->f);
1051 ppc_vzero(gen->f, zero_vec);
1053 tmp_vec = ppc_allocate_vec_register(gen->f);
1054 tmp2_vec = ppc_allocate_vec_register(gen->f);
1055 (void)tmp2_vec; // Unused
1057 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y) ||
1058 IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z)) {
1059 x0_vec = get_src_vec(gen, inst, 0, CHAN_X);
1060 x1_vec = get_src_vec(gen, inst, 1, CHAN_X);
1062 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X) ||
1063 IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z)) {
1064 y0_vec = get_src_vec(gen, inst, 0, CHAN_Y);
1065 y1_vec = get_src_vec(gen, inst, 1, CHAN_Y);
1067 if (IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X) ||
1068 IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y)) {
1069 z0_vec = get_src_vec(gen, inst, 0, CHAN_Z);
1070 z1_vec = get_src_vec(gen, inst, 1, CHAN_Z);
1073 IF_IS_DST0_CHANNEL_ENABLED(*inst, CHAN_X) {
1074 /* tmp = y0 * z1 */
1075 ppc_vmaddfp(gen->f, tmp_vec, y0_vec, z1_vec, zero_vec);
1076 /* tmp = tmp - z0 * y1*/
1077 ppc_vnmsubfp(gen->f, tmp_vec, tmp_vec, z0_vec, y1_vec);
1078 emit_store(gen, tmp_vec, inst, CHAN_X, FALSE);
1080 IF_IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Y) {
1081 /* tmp = z0 * x1 */
1082 ppc_vmaddfp(gen->f, tmp_vec, z0_vec, x1_vec, zero_vec);
1083 /* tmp = tmp - x0 * z1 */
1084 ppc_vnmsubfp(gen->f, tmp_vec, tmp_vec, x0_vec, z1_vec);
1085 emit_store(gen, tmp_vec, inst, CHAN_Y, FALSE);
1087 IF_IS_DST0_CHANNEL_ENABLED(*inst, CHAN_Z) {
1088 /* tmp = x0 * y1 */
1089 ppc_vmaddfp(gen->f, tmp_vec, x0_vec, y1_vec, zero_vec);
1090 /* tmp = tmp - y0 * x1 */
1091 ppc_vnmsubfp(gen->f, tmp_vec, tmp_vec, y0_vec, x1_vec);
1092 emit_store(gen, tmp_vec, inst, CHAN_Z, FALSE);
1094 /* W is undefined */
1096 ppc_release_vec_register(gen->f, tmp_vec);
1097 ppc_release_vec_register(gen->f, zero_vec);
1098 release_src_vecs(gen);
1101 static int
1102 emit_instruction(struct gen_context *gen,
1103 struct tgsi_full_instruction *inst)
1106 /* we don't handle saturation/clamping yet */
1107 if (inst->Instruction.Saturate != TGSI_SAT_NONE)
1108 return 0;
1110 /* need to use extra temps to fix SOA dependencies : */
1111 if (tgsi_check_soa_dependencies(inst))
1112 return FALSE;
1114 switch (inst->Instruction.Opcode) {
1115 case TGSI_OPCODE_MOV:
1116 case TGSI_OPCODE_ABS:
1117 case TGSI_OPCODE_FLR:
1118 case TGSI_OPCODE_FRC:
1119 case TGSI_OPCODE_EX2:
1120 case TGSI_OPCODE_LG2:
1121 emit_unaryop(gen, inst);
1122 break;
1123 case TGSI_OPCODE_RSQ:
1124 case TGSI_OPCODE_RCP:
1125 emit_scalar_unaryop(gen, inst);
1126 break;
1127 case TGSI_OPCODE_ADD:
1128 case TGSI_OPCODE_SUB:
1129 case TGSI_OPCODE_MUL:
1130 case TGSI_OPCODE_MIN:
1131 case TGSI_OPCODE_MAX:
1132 emit_binop(gen, inst);
1133 break;
1134 case TGSI_OPCODE_SEQ:
1135 case TGSI_OPCODE_SNE:
1136 case TGSI_OPCODE_SLT:
1137 case TGSI_OPCODE_SGT:
1138 case TGSI_OPCODE_SLE:
1139 case TGSI_OPCODE_SGE:
1140 emit_inequality(gen, inst);
1141 break;
1142 case TGSI_OPCODE_MAD:
1143 case TGSI_OPCODE_LRP:
1144 emit_triop(gen, inst);
1145 break;
1146 case TGSI_OPCODE_DP3:
1147 case TGSI_OPCODE_DP4:
1148 case TGSI_OPCODE_DPH:
1149 emit_dotprod(gen, inst);
1150 break;
1151 case TGSI_OPCODE_LIT:
1152 emit_lit(gen, inst);
1153 break;
1154 case TGSI_OPCODE_LOG:
1155 emit_log(gen, inst);
1156 break;
1157 case TGSI_OPCODE_EXP:
1158 emit_exp(gen, inst);
1159 break;
1160 case TGSI_OPCODE_POW:
1161 emit_pow(gen, inst);
1162 break;
1163 case TGSI_OPCODE_XPD:
1164 emit_xpd(gen, inst);
1165 break;
1166 case TGSI_OPCODE_END:
1167 /* normal end */
1168 return 1;
1169 default:
1170 return 0;
1172 return 1;
1176 static void
1177 emit_declaration(
1178 struct ppc_function *func,
1179 struct tgsi_full_declaration *decl )
1181 if( decl->Declaration.File == TGSI_FILE_INPUT ||
1182 decl->Declaration.File == TGSI_FILE_SYSTEM_VALUE ) {
1183 #if 0
1184 unsigned first, last, mask;
1185 unsigned i, j;
1187 first = decl->Range.First;
1188 last = decl->Range.Last;
1189 mask = decl->Declaration.UsageMask;
1191 for( i = first; i <= last; i++ ) {
1192 for( j = 0; j < NUM_CHANNELS; j++ ) {
1193 if( mask & (1 << j) ) {
1194 switch( decl->Declaration.Interpolate ) {
1195 case TGSI_INTERPOLATE_CONSTANT:
1196 emit_coef_a0( func, 0, i, j );
1197 emit_inputs( func, 0, i, j );
1198 break;
1200 case TGSI_INTERPOLATE_LINEAR:
1201 emit_tempf( func, 0, 0, TGSI_SWIZZLE_X );
1202 emit_coef_dadx( func, 1, i, j );
1203 emit_tempf( func, 2, 0, TGSI_SWIZZLE_Y );
1204 emit_coef_dady( func, 3, i, j );
1205 emit_mul( func, 0, 1 ); /* x * dadx */
1206 emit_coef_a0( func, 4, i, j );
1207 emit_mul( func, 2, 3 ); /* y * dady */
1208 emit_add( func, 0, 4 ); /* x * dadx + a0 */
1209 emit_add( func, 0, 2 ); /* x * dadx + y * dady + a0 */
1210 emit_inputs( func, 0, i, j );
1211 break;
1213 case TGSI_INTERPOLATE_PERSPECTIVE:
1214 emit_tempf( func, 0, 0, TGSI_SWIZZLE_X );
1215 emit_coef_dadx( func, 1, i, j );
1216 emit_tempf( func, 2, 0, TGSI_SWIZZLE_Y );
1217 emit_coef_dady( func, 3, i, j );
1218 emit_mul( func, 0, 1 ); /* x * dadx */
1219 emit_tempf( func, 4, 0, TGSI_SWIZZLE_W );
1220 emit_coef_a0( func, 5, i, j );
1221 emit_rcp( func, 4, 4 ); /* 1.0 / w */
1222 emit_mul( func, 2, 3 ); /* y * dady */
1223 emit_add( func, 0, 5 ); /* x * dadx + a0 */
1224 emit_add( func, 0, 2 ); /* x * dadx + y * dady + a0 */
1225 emit_mul( func, 0, 4 ); /* (x * dadx + y * dady + a0) / w */
1226 emit_inputs( func, 0, i, j );
1227 break;
1229 default:
1230 assert( 0 );
1231 break;
1236 #endif
1242 static void
1243 emit_prologue(struct ppc_function *func)
1245 /* XXX set up stack frame */
1249 static void
1250 emit_epilogue(struct ppc_function *func)
1252 ppc_comment(func, -4, "Epilogue:");
1253 ppc_return(func);
1254 /* XXX restore prev stack frame */
1255 #if 0
1256 debug_printf("PPC: Emitted %u instructions\n", func->num_inst);
1257 #endif
1263 * Translate a TGSI vertex/fragment shader to PPC code.
1265 * \param tokens the TGSI input shader
1266 * \param func the output PPC code/function
1267 * \param immediates buffer to place immediates, later passed to PPC func
1268 * \return TRUE for success, FALSE if translation failed
1270 boolean
1271 tgsi_emit_ppc(const struct tgsi_token *tokens,
1272 struct ppc_function *func,
1273 float (*immediates)[4],
1274 boolean do_swizzles )
1276 static int use_ppc_asm = -1;
1277 struct tgsi_parse_context parse;
1278 /*boolean instruction_phase = FALSE;*/
1279 unsigned ok = 1;
1280 uint num_immediates = 0;
1281 struct gen_context gen;
1282 uint ic = 0;
1284 if (use_ppc_asm < 0) {
1285 /* If GALLIUM_NOPPC is set, don't use PPC codegen */
1286 use_ppc_asm = !debug_get_bool_option("GALLIUM_NOPPC", FALSE);
1288 if (!use_ppc_asm)
1289 return FALSE;
1291 if (0) {
1292 debug_printf("\n********* TGSI->PPC ********\n");
1293 tgsi_dump(tokens, 0);
1296 util_init_math();
1298 init_gen_context(&gen, func);
1300 emit_prologue(func);
1302 tgsi_parse_init( &parse, tokens );
1304 while (!tgsi_parse_end_of_tokens(&parse) && ok) {
1305 tgsi_parse_token(&parse);
1307 switch (parse.FullToken.Token.Type) {
1308 case TGSI_TOKEN_TYPE_DECLARATION:
1309 if (parse.FullHeader.Processor.Processor == TGSI_PROCESSOR_FRAGMENT) {
1310 emit_declaration(func, &parse.FullToken.FullDeclaration );
1312 break;
1314 case TGSI_TOKEN_TYPE_INSTRUCTION:
1315 if (func->print) {
1316 _debug_printf("# ");
1317 ic++;
1318 tgsi_dump_instruction(&parse.FullToken.FullInstruction, ic);
1321 ok = emit_instruction(&gen, &parse.FullToken.FullInstruction);
1323 if (!ok) {
1324 uint opcode = parse.FullToken.FullInstruction.Instruction.Opcode;
1325 debug_printf("failed to translate tgsi opcode %d (%s) to PPC (%s)\n",
1326 opcode,
1327 tgsi_get_opcode_name(opcode),
1328 parse.FullHeader.Processor.Processor == TGSI_PROCESSOR_VERTEX ?
1329 "vertex shader" : "fragment shader");
1331 break;
1333 case TGSI_TOKEN_TYPE_IMMEDIATE:
1334 /* splat each immediate component into a float[4] vector for SoA */
1336 const uint size = parse.FullToken.FullImmediate.Immediate.NrTokens - 1;
1337 uint i;
1338 assert(size <= 4);
1339 assert(num_immediates < TGSI_EXEC_NUM_IMMEDIATES);
1340 for (i = 0; i < size; i++) {
1341 immediates[num_immediates][i] =
1342 parse.FullToken.FullImmediate.u[i].Float;
1344 num_immediates++;
1346 break;
1348 case TGSI_TOKEN_TYPE_PROPERTY:
1349 break;
1351 default:
1352 ok = 0;
1353 assert( 0 );
1357 emit_epilogue(func);
1359 tgsi_parse_free( &parse );
1361 if (ppc_num_instructions(func) == 0) {
1362 /* ran out of memory for instructions */
1363 ok = FALSE;
1366 if (!ok)
1367 debug_printf("TGSI->PPC translation failed\n");
1369 return ok;
1372 #else
1374 void ppc_dummy_func(void);
1376 void ppc_dummy_func(void)
1380 #endif /* PIPE_ARCH_PPC */