Check for SYS/GL during library init. Reason is that
[AROS.git] / workbench / libs / mesa / src / gallium / drivers / nv50 / nv50_pc_optimize.c
blobd72b23c137a74cd61b32ea0f5acd0266c89c07ee
1 /*
2 * Copyright 2010 Christoph Bumiller
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
23 #include "nv50_pc.h"
25 #define DESCEND_ARBITRARY(j, f) \
26 do { \
27 b->pass_seq = ctx->pc->pass_seq; \
29 for (j = 0; j < 2; ++j) \
30 if (b->out[j] && b->out[j]->pass_seq < ctx->pc->pass_seq) \
31 f(ctx, b->out[j]); \
32 } while (0)
34 extern unsigned nv50_inst_min_size(struct nv_instruction *);
36 struct nv_pc_pass {
37 struct nv_pc *pc;
40 static INLINE boolean
41 values_equal(struct nv_value *a, struct nv_value *b)
43 /* XXX: sizes */
44 return (a->reg.file == b->reg.file && a->join->reg.id == b->join->reg.id);
47 static INLINE boolean
48 inst_commutation_check(struct nv_instruction *a,
49 struct nv_instruction *b)
51 int si, di;
53 for (di = 0; di < 4; ++di) {
54 if (!a->def[di])
55 break;
56 for (si = 0; si < 5; ++si) {
57 if (!b->src[si])
58 continue;
59 if (values_equal(a->def[di], b->src[si]->value))
60 return FALSE;
64 if (b->flags_src && b->flags_src->value == a->flags_def)
65 return FALSE;
67 return TRUE;
70 /* Check whether we can swap the order of the instructions,
71 * where a & b may be either the earlier or the later one.
73 static boolean
74 inst_commutation_legal(struct nv_instruction *a,
75 struct nv_instruction *b)
77 return inst_commutation_check(a, b) && inst_commutation_check(b, a);
80 static INLINE boolean
81 inst_cullable(struct nv_instruction *nvi)
83 if (nvi->opcode == NV_OP_STA)
84 return FALSE;
85 return (!(nvi->is_terminator || nvi->is_join ||
86 nvi->target ||
87 nvi->fixed ||
88 nv_nvi_refcount(nvi)));
91 static INLINE boolean
92 nvi_isnop(struct nv_instruction *nvi)
94 if (nvi->opcode == NV_OP_EXPORT || nvi->opcode == NV_OP_UNDEF)
95 return TRUE;
97 /* NOTE: 'fixed' now only means that it shouldn't be optimized away,
98 * but we can still remove it if it is a no-op move.
100 if (/* nvi->fixed || */
101 /* nvi->flags_src || */ /* cond. MOV to same register is still NOP */
102 nvi->flags_def ||
103 nvi->is_terminator ||
104 nvi->is_join)
105 return FALSE;
107 if (nvi->def[0] && nvi->def[0]->join->reg.id < 0)
108 return TRUE;
110 if (nvi->opcode != NV_OP_MOV && nvi->opcode != NV_OP_SELECT)
111 return FALSE;
113 if (nvi->def[0]->reg.file != nvi->src[0]->value->reg.file)
114 return FALSE;
116 if (nvi->src[0]->value->join->reg.id < 0) {
117 NV50_DBGMSG(PROG_IR, "nvi_isnop: orphaned value detected\n");
118 return TRUE;
121 if (nvi->opcode == NV_OP_SELECT)
122 if (!values_equal(nvi->def[0], nvi->src[1]->value))
123 return FALSE;
125 return values_equal(nvi->def[0], nvi->src[0]->value);
128 struct nv_pass {
129 struct nv_pc *pc;
130 int n;
131 void *priv;
134 static int
135 nv_pass_flatten(struct nv_pass *ctx, struct nv_basic_block *b);
137 static void
138 nv_pc_pass_pre_emission(void *priv, struct nv_basic_block *b)
140 struct nv_pc *pc = (struct nv_pc *)priv;
141 struct nv_basic_block *in;
142 struct nv_instruction *nvi, *next;
143 int j;
144 uint size, n32 = 0;
146 /* find first non-empty block emitted before b */
147 for (j = pc->num_blocks - 1; j >= 0 && !pc->bb_list[j]->bin_size; --j);
148 for (; j >= 0; --j) {
149 in = pc->bb_list[j];
151 /* check for no-op branches (BRA $PC+8) */
152 if (in->exit && in->exit->opcode == NV_OP_BRA && in->exit->target == b) {
153 in->bin_size -= 8;
154 pc->bin_size -= 8;
156 for (++j; j < pc->num_blocks; ++j)
157 pc->bb_list[j]->bin_pos -= 8;
159 nv_nvi_delete(in->exit);
161 b->bin_pos = in->bin_pos + in->bin_size;
163 if (in->bin_size) /* no more no-op branches to b */
164 break;
167 pc->bb_list[pc->num_blocks++] = b;
169 /* visit node */
171 for (nvi = b->entry; nvi; nvi = next) {
172 next = nvi->next;
173 if (nvi_isnop(nvi))
174 nv_nvi_delete(nvi);
177 for (nvi = b->entry; nvi; nvi = next) {
178 next = nvi->next;
180 size = nv50_inst_min_size(nvi);
181 if (nvi->next && size < 8)
182 ++n32;
183 else
184 if ((n32 & 1) && nvi->next &&
185 nv50_inst_min_size(nvi->next) == 4 &&
186 inst_commutation_legal(nvi, nvi->next)) {
187 ++n32;
188 nv_nvi_permute(nvi, nvi->next);
189 next = nvi;
190 } else {
191 nvi->is_long = 1;
193 b->bin_size += n32 & 1;
194 if (n32 & 1)
195 nvi->prev->is_long = 1;
196 n32 = 0;
198 b->bin_size += 1 + nvi->is_long;
201 if (!b->entry) {
202 NV50_DBGMSG(PROG_IR, "block %p is now empty\n", b);
203 } else
204 if (!b->exit->is_long) {
205 assert(n32);
206 b->exit->is_long = 1;
207 b->bin_size += 1;
209 /* might have del'd a hole tail of instructions */
210 if (!b->exit->prev->is_long && !(n32 & 1)) {
211 b->bin_size += 1;
212 b->exit->prev->is_long = 1;
215 assert(!b->entry || (b->exit && b->exit->is_long));
217 pc->bin_size += b->bin_size *= 4;
220 static int
221 nv_pc_pass2(struct nv_pc *pc, struct nv_basic_block *root)
223 struct nv_pass pass;
225 pass.pc = pc;
227 pc->pass_seq++;
229 nv_pass_flatten(&pass, root);
231 nv_pc_pass_in_order(root, nv_pc_pass_pre_emission, pc);
233 return 0;
237 nv_pc_exec_pass2(struct nv_pc *pc)
239 int i, ret;
241 NV50_DBGMSG(PROG_IR, "preparing %u blocks for emission\n", pc->num_blocks);
243 pc->num_blocks = 0; /* will reorder bb_list */
245 for (i = 0; i < pc->num_subroutines + 1; ++i)
246 if (pc->root[i] && (ret = nv_pc_pass2(pc, pc->root[i])))
247 return ret;
248 return 0;
251 static INLINE boolean
252 is_cmem_load(struct nv_instruction *nvi)
254 return (nvi->opcode == NV_OP_LDA &&
255 nvi->src[0]->value->reg.file >= NV_FILE_MEM_C(0) &&
256 nvi->src[0]->value->reg.file <= NV_FILE_MEM_C(15));
259 static INLINE boolean
260 is_smem_load(struct nv_instruction *nvi)
262 return (nvi->opcode == NV_OP_LDA &&
263 (nvi->src[0]->value->reg.file == NV_FILE_MEM_S ||
264 nvi->src[0]->value->reg.file <= NV_FILE_MEM_P));
267 static INLINE boolean
268 is_immd_move(struct nv_instruction *nvi)
270 return (nvi->opcode == NV_OP_MOV &&
271 nvi->src[0]->value->reg.file == NV_FILE_IMM);
274 static INLINE void
275 check_swap_src_0_1(struct nv_instruction *nvi)
277 static const ubyte cc_swapped[8] = { 0, 4, 2, 6, 1, 5, 3, 7 };
279 struct nv_ref *src0 = nvi->src[0], *src1 = nvi->src[1];
281 if (!nv_op_commutative(nvi->opcode))
282 return;
283 assert(src0 && src1);
285 if (src1->value->reg.file == NV_FILE_IMM)
286 return;
288 if (is_cmem_load(src0->value->insn)) {
289 if (!is_cmem_load(src1->value->insn)) {
290 nvi->src[0] = src1;
291 nvi->src[1] = src0;
292 /* debug_printf("swapping cmem load to 1\n"); */
294 } else
295 if (is_smem_load(src1->value->insn)) {
296 if (!is_smem_load(src0->value->insn)) {
297 nvi->src[0] = src1;
298 nvi->src[1] = src0;
299 /* debug_printf("swapping smem load to 0\n"); */
303 if (nvi->opcode == NV_OP_SET && nvi->src[0] != src0)
304 nvi->set_cond = (nvi->set_cond & ~7) | cc_swapped[nvi->set_cond & 7];
307 static int
308 nv_pass_fold_stores(struct nv_pass *ctx, struct nv_basic_block *b)
310 struct nv_instruction *nvi, *sti, *next;
311 int j;
313 for (sti = b->entry; sti; sti = next) {
314 next = sti->next;
316 /* only handling MOV to $oX here */
317 if (!sti->def[0] || sti->def[0]->reg.file != NV_FILE_OUT)
318 continue;
319 if (sti->opcode != NV_OP_MOV && sti->opcode != NV_OP_STA)
320 continue;
322 nvi = sti->src[0]->value->insn;
323 if (!nvi || nvi->opcode == NV_OP_PHI || nv_is_vector_op(nvi->opcode))
324 continue;
325 assert(nvi->def[0] == sti->src[0]->value);
327 if (nvi->opcode == NV_OP_SELECT)
328 continue;
329 if (nvi->def[0]->refc > 1)
330 continue;
332 /* cannot write to $oX when using immediate */
333 for (j = 0; j < 4 && nvi->src[j]; ++j)
334 if (nvi->src[j]->value->reg.file == NV_FILE_IMM ||
335 nvi->src[j]->value->reg.file == NV_FILE_MEM_L)
336 break;
337 if (j < 4 && nvi->src[j])
338 continue;
340 nvi->def[0] = sti->def[0];
341 nvi->def[0]->insn = nvi;
342 nvi->fixed = sti->fixed;
344 nv_nvi_delete(sti);
346 DESCEND_ARBITRARY(j, nv_pass_fold_stores);
348 return 0;
351 static int
352 nv_pass_fold_loads(struct nv_pass *ctx, struct nv_basic_block *b)
354 struct nv_instruction *nvi, *ld;
355 int j;
357 for (nvi = b->entry; nvi; nvi = nvi->next) {
358 check_swap_src_0_1(nvi);
360 for (j = 0; j < 3; ++j) {
361 if (!nvi->src[j])
362 break;
363 ld = nvi->src[j]->value->insn;
364 if (!ld)
365 continue;
367 if (is_immd_move(ld) && nv50_nvi_can_use_imm(nvi, j)) {
368 nv_reference(ctx->pc, &nvi->src[j], ld->src[0]->value);
369 continue;
372 if (ld->opcode != NV_OP_LDA)
373 continue;
374 if (!nv50_nvi_can_load(nvi, j, ld->src[0]->value))
375 continue;
377 if (j == 0 && ld->src[4]) /* can't load shared mem */
378 continue;
380 /* fold it ! */
381 nv_reference(ctx->pc, &nvi->src[j], ld->src[0]->value);
382 if (ld->src[4])
383 nv_reference(ctx->pc, &nvi->src[4], ld->src[4]->value);
385 if (!nv_nvi_refcount(ld))
386 nv_nvi_delete(ld);
389 DESCEND_ARBITRARY(j, nv_pass_fold_loads);
391 return 0;
394 /* NOTE: Assumes loads have not yet been folded. */
395 static int
396 nv_pass_lower_mods(struct nv_pass *ctx, struct nv_basic_block *b)
398 int j;
399 struct nv_instruction *nvi, *mi, *next;
400 ubyte mod;
402 for (nvi = b->entry; nvi; nvi = next) {
403 next = nvi->next;
404 if (nvi->opcode == NV_OP_SUB) {
405 nvi->opcode = NV_OP_ADD;
406 nvi->src[1]->mod ^= NV_MOD_NEG;
409 for (j = 0; j < 4 && nvi->src[j]; ++j) {
410 mi = nvi->src[j]->value->insn;
411 if (!mi)
412 continue;
413 if (mi->def[0]->refc > 1)
414 continue;
416 if (mi->opcode == NV_OP_NEG) mod = NV_MOD_NEG;
417 else
418 if (mi->opcode == NV_OP_ABS) mod = NV_MOD_ABS;
419 else
420 continue;
421 assert(!(mod & mi->src[0]->mod & NV_MOD_NEG));
423 mod |= mi->src[0]->mod;
425 if (mi->flags_def || mi->flags_src)
426 continue;
428 if ((nvi->opcode == NV_OP_ABS) || (nvi->src[j]->mod & NV_MOD_ABS)) {
429 /* abs neg [abs] = abs */
430 mod &= ~(NV_MOD_NEG | NV_MOD_ABS);
431 } else
432 if ((nvi->opcode == NV_OP_NEG) && (mod & NV_MOD_NEG)) {
433 /* neg as opcode and modifier on same insn cannot occur */
434 /* neg neg abs = abs, neg neg = identity */
435 assert(j == 0);
436 if (mod & NV_MOD_ABS)
437 nvi->opcode = NV_OP_ABS;
438 else
439 if (nvi->flags_def)
440 nvi->opcode = NV_OP_CVT;
441 else
442 nvi->opcode = NV_OP_MOV;
443 mod = 0;
446 if ((nv50_supported_src_mods(nvi->opcode, j) & mod) != mod)
447 continue;
449 nv_reference(ctx->pc, &nvi->src[j], mi->src[0]->value);
451 nvi->src[j]->mod ^= mod;
454 if (nvi->opcode == NV_OP_SAT) {
455 mi = nvi->src[0]->value->insn;
457 if (mi->opcode != NV_OP_ADD && mi->opcode != NV_OP_MAD)
458 continue;
459 if (mi->flags_def || mi->def[0]->refc > 1)
460 continue;
462 mi->saturate = 1;
463 mi->def[0] = nvi->def[0];
464 mi->def[0]->insn = mi;
465 nv_nvi_delete(nvi);
468 DESCEND_ARBITRARY(j, nv_pass_lower_mods);
470 return 0;
473 #define SRC_IS_MUL(s) ((s)->insn && (s)->insn->opcode == NV_OP_MUL)
475 static void
476 modifiers_apply(uint32_t *val, ubyte type, ubyte mod)
478 if (mod & NV_MOD_ABS) {
479 if (type == NV_TYPE_F32)
480 *val &= 0x7fffffff;
481 else
482 if ((*val) & (1 << 31))
483 *val = ~(*val) + 1;
485 if (mod & NV_MOD_NEG) {
486 if (type == NV_TYPE_F32)
487 *val ^= 0x80000000;
488 else
489 *val = ~(*val) + 1;
493 static INLINE uint
494 modifiers_opcode(ubyte mod)
496 switch (mod) {
497 case NV_MOD_NEG: return NV_OP_NEG;
498 case NV_MOD_ABS: return NV_OP_ABS;
499 case 0:
500 return NV_OP_MOV;
501 default:
502 return NV_OP_NOP;
506 static void
507 constant_expression(struct nv_pc *pc, struct nv_instruction *nvi,
508 struct nv_value *src0, struct nv_value *src1)
510 struct nv_value *val;
511 union {
512 float f32;
513 uint32_t u32;
514 int32_t s32;
515 } u0, u1, u;
516 ubyte type;
518 if (!nvi->def[0])
519 return;
520 type = nvi->def[0]->reg.type;
522 u.u32 = 0;
523 u0.u32 = src0->reg.imm.u32;
524 u1.u32 = src1->reg.imm.u32;
526 modifiers_apply(&u0.u32, type, nvi->src[0]->mod);
527 modifiers_apply(&u1.u32, type, nvi->src[1]->mod);
529 switch (nvi->opcode) {
530 case NV_OP_MAD:
531 if (nvi->src[2]->value->reg.file != NV_FILE_GPR)
532 return;
533 /* fall through */
534 case NV_OP_MUL:
535 switch (type) {
536 case NV_TYPE_F32: u.f32 = u0.f32 * u1.f32; break;
537 case NV_TYPE_U32: u.u32 = u0.u32 * u1.u32; break;
538 case NV_TYPE_S32: u.s32 = u0.s32 * u1.s32; break;
539 default:
540 assert(0);
541 break;
543 break;
544 case NV_OP_ADD:
545 switch (type) {
546 case NV_TYPE_F32: u.f32 = u0.f32 + u1.f32; break;
547 case NV_TYPE_U32: u.u32 = u0.u32 + u1.u32; break;
548 case NV_TYPE_S32: u.s32 = u0.s32 + u1.s32; break;
549 default:
550 assert(0);
551 break;
553 break;
554 case NV_OP_SUB:
555 switch (type) {
556 case NV_TYPE_F32: u.f32 = u0.f32 - u1.f32; break;
557 case NV_TYPE_U32: u.u32 = u0.u32 - u1.u32; break;
558 case NV_TYPE_S32: u.s32 = u0.s32 - u1.s32; break;
559 default:
560 assert(0);
561 break;
563 break;
564 default:
565 return;
568 nvi->opcode = NV_OP_MOV;
570 val = new_value(pc, NV_FILE_IMM, type);
572 val->reg.imm.u32 = u.u32;
574 nv_reference(pc, &nvi->src[1], NULL);
575 nv_reference(pc, &nvi->src[0], val);
577 if (nvi->src[2]) { /* from MAD */
578 nvi->src[1] = nvi->src[0];
579 nvi->src[0] = nvi->src[2];
580 nvi->src[2] = NULL;
581 nvi->opcode = NV_OP_ADD;
583 if (val->reg.imm.u32 == 0) {
584 nvi->src[1] = NULL;
585 nvi->opcode = NV_OP_MOV;
590 static void
591 constant_operand(struct nv_pc *pc,
592 struct nv_instruction *nvi, struct nv_value *val, int s)
594 union {
595 float f32;
596 uint32_t u32;
597 int32_t s32;
598 } u;
599 int t = s ? 0 : 1;
600 uint op;
601 ubyte type;
603 if (!nvi->def[0])
604 return;
605 type = nvi->def[0]->reg.type;
607 u.u32 = val->reg.imm.u32;
608 modifiers_apply(&u.u32, type, nvi->src[s]->mod);
610 switch (nvi->opcode) {
611 case NV_OP_MUL:
612 if ((type == NV_TYPE_F32 && u.f32 == 1.0f) ||
613 (NV_TYPE_ISINT(type) && u.u32 == 1)) {
614 if ((op = modifiers_opcode(nvi->src[t]->mod)) == NV_OP_NOP)
615 break;
616 nvi->opcode = op;
617 nv_reference(pc, &nvi->src[s], NULL);
618 nvi->src[0] = nvi->src[t];
619 nvi->src[1] = NULL;
620 } else
621 if ((type == NV_TYPE_F32 && u.f32 == 2.0f) ||
622 (NV_TYPE_ISINT(type) && u.u32 == 2)) {
623 nvi->opcode = NV_OP_ADD;
624 nv_reference(pc, &nvi->src[s], nvi->src[t]->value);
625 nvi->src[s]->mod = nvi->src[t]->mod;
626 } else
627 if (type == NV_TYPE_F32 && u.f32 == -1.0f) {
628 if (nvi->src[t]->mod & NV_MOD_NEG)
629 nvi->opcode = NV_OP_MOV;
630 else
631 nvi->opcode = NV_OP_NEG;
632 nv_reference(pc, &nvi->src[s], NULL);
633 nvi->src[0] = nvi->src[t];
634 nvi->src[1] = NULL;
635 } else
636 if (type == NV_TYPE_F32 && u.f32 == -2.0f) {
637 nvi->opcode = NV_OP_ADD;
638 nv_reference(pc, &nvi->src[s], nvi->src[t]->value);
639 nvi->src[s]->mod = (nvi->src[t]->mod ^= NV_MOD_NEG);
640 } else
641 if (u.u32 == 0) {
642 nvi->opcode = NV_OP_MOV;
643 nv_reference(pc, &nvi->src[t], NULL);
644 if (s) {
645 nvi->src[0] = nvi->src[1];
646 nvi->src[1] = NULL;
649 break;
650 case NV_OP_ADD:
651 if (u.u32 == 0) {
652 if ((op = modifiers_opcode(nvi->src[t]->mod)) == NV_OP_NOP)
653 break;
654 nvi->opcode = op;
655 nv_reference(pc, &nvi->src[s], NULL);
656 nvi->src[0] = nvi->src[t];
657 nvi->src[1] = NULL;
659 break;
660 case NV_OP_RCP:
661 u.f32 = 1.0f / u.f32;
662 (val = new_value(pc, NV_FILE_IMM, NV_TYPE_F32))->reg.imm.f32 = u.f32;
663 nvi->opcode = NV_OP_MOV;
664 assert(s == 0);
665 nv_reference(pc, &nvi->src[0], val);
666 break;
667 case NV_OP_RSQ:
668 u.f32 = 1.0f / sqrtf(u.f32);
669 (val = new_value(pc, NV_FILE_IMM, NV_TYPE_F32))->reg.imm.f32 = u.f32;
670 nvi->opcode = NV_OP_MOV;
671 assert(s == 0);
672 nv_reference(pc, &nvi->src[0], val);
673 break;
674 default:
675 break;
678 if (nvi->opcode == NV_OP_MOV && nvi->flags_def) {
679 struct nv_instruction *cvt = new_instruction_at(pc, nvi, NV_OP_CVT);
681 nv_reference(pc, &cvt->src[0], nvi->def[0]);
683 cvt->flags_def = nvi->flags_def;
684 nvi->flags_def = NULL;
688 static int
689 nv_pass_lower_arith(struct nv_pass *ctx, struct nv_basic_block *b)
691 struct nv_instruction *nvi, *next;
692 int j;
694 for (nvi = b->entry; nvi; nvi = next) {
695 struct nv_value *src0, *src1, *src;
696 int mod;
698 next = nvi->next;
700 src0 = nvcg_find_immediate(nvi->src[0]);
701 src1 = nvcg_find_immediate(nvi->src[1]);
703 if (src0 && src1)
704 constant_expression(ctx->pc, nvi, src0, src1);
705 else {
706 if (src0)
707 constant_operand(ctx->pc, nvi, src0, 0);
708 else
709 if (src1)
710 constant_operand(ctx->pc, nvi, src1, 1);
713 /* try to combine MUL, ADD into MAD */
714 if (nvi->opcode != NV_OP_ADD)
715 continue;
717 src0 = nvi->src[0]->value;
718 src1 = nvi->src[1]->value;
720 if (SRC_IS_MUL(src0) && src0->refc == 1)
721 src = src0;
722 else
723 if (SRC_IS_MUL(src1) && src1->refc == 1)
724 src = src1;
725 else
726 continue;
728 /* could have an immediate from above constant_* */
729 if (src0->reg.file != NV_FILE_GPR || src1->reg.file != NV_FILE_GPR)
730 continue;
732 nvi->opcode = NV_OP_MAD;
733 mod = nvi->src[(src == src0) ? 0 : 1]->mod;
734 nv_reference(ctx->pc, &nvi->src[(src == src0) ? 0 : 1], NULL);
735 nvi->src[2] = nvi->src[(src == src0) ? 1 : 0];
737 assert(!(mod & ~NV_MOD_NEG));
738 nvi->src[0] = new_ref(ctx->pc, src->insn->src[0]->value);
739 nvi->src[1] = new_ref(ctx->pc, src->insn->src[1]->value);
740 nvi->src[0]->mod = src->insn->src[0]->mod ^ mod;
741 nvi->src[1]->mod = src->insn->src[1]->mod;
743 DESCEND_ARBITRARY(j, nv_pass_lower_arith);
745 return 0;
748 /* TODO: redundant store elimination */
750 struct load_record {
751 struct load_record *next;
752 uint64_t data[2];
753 struct nv_value *value;
756 #define LOAD_RECORD_POOL_SIZE 1024
758 struct nv_pass_reld_elim {
759 struct nv_pc *pc;
761 struct load_record *imm;
762 struct load_record *mem_s;
763 struct load_record *mem_v;
764 struct load_record *mem_c[16];
765 struct load_record *mem_l;
767 struct load_record pool[LOAD_RECORD_POOL_SIZE];
768 int alloc;
771 /* TODO: properly handle loads from l[] memory in the presence of stores */
772 static int
773 nv_pass_reload_elim(struct nv_pass_reld_elim *ctx, struct nv_basic_block *b)
775 struct load_record **rec, *it;
776 struct nv_instruction *ld, *next;
777 uint64_t data[2];
778 struct nv_value *val;
779 int j;
781 for (ld = b->entry; ld; ld = next) {
782 next = ld->next;
783 if (!ld->src[0])
784 continue;
785 val = ld->src[0]->value;
786 rec = NULL;
788 if (ld->opcode == NV_OP_LINTERP || ld->opcode == NV_OP_PINTERP) {
789 data[0] = val->reg.id;
790 data[1] = 0;
791 rec = &ctx->mem_v;
792 } else
793 if (ld->opcode == NV_OP_LDA) {
794 data[0] = val->reg.id;
795 data[1] = ld->src[4] ? ld->src[4]->value->n : ~0ULL;
796 if (val->reg.file >= NV_FILE_MEM_C(0) &&
797 val->reg.file <= NV_FILE_MEM_C(15))
798 rec = &ctx->mem_c[val->reg.file - NV_FILE_MEM_C(0)];
799 else
800 if (val->reg.file == NV_FILE_MEM_S)
801 rec = &ctx->mem_s;
802 else
803 if (val->reg.file == NV_FILE_MEM_L)
804 rec = &ctx->mem_l;
805 } else
806 if ((ld->opcode == NV_OP_MOV) && (val->reg.file == NV_FILE_IMM)) {
807 data[0] = val->reg.imm.u32;
808 data[1] = 0;
809 rec = &ctx->imm;
812 if (!rec || !ld->def[0]->refc)
813 continue;
815 for (it = *rec; it; it = it->next)
816 if (it->data[0] == data[0] && it->data[1] == data[1])
817 break;
819 if (it) {
820 if (ld->def[0]->reg.id >= 0)
821 it->value = ld->def[0];
822 else
823 if (!ld->fixed)
824 nvcg_replace_value(ctx->pc, ld->def[0], it->value);
825 } else {
826 if (ctx->alloc == LOAD_RECORD_POOL_SIZE)
827 continue;
828 it = &ctx->pool[ctx->alloc++];
829 it->next = *rec;
830 it->data[0] = data[0];
831 it->data[1] = data[1];
832 it->value = ld->def[0];
833 *rec = it;
837 ctx->imm = NULL;
838 ctx->mem_s = NULL;
839 ctx->mem_v = NULL;
840 for (j = 0; j < 16; ++j)
841 ctx->mem_c[j] = NULL;
842 ctx->mem_l = NULL;
843 ctx->alloc = 0;
845 DESCEND_ARBITRARY(j, nv_pass_reload_elim);
847 return 0;
850 static int
851 nv_pass_tex_mask(struct nv_pass *ctx, struct nv_basic_block *b)
853 int i, c, j;
855 for (i = 0; i < ctx->pc->num_instructions; ++i) {
856 struct nv_instruction *nvi = &ctx->pc->instructions[i];
857 struct nv_value *def[4];
859 if (!nv_is_vector_op(nvi->opcode))
860 continue;
861 nvi->tex_mask = 0;
863 for (c = 0; c < 4; ++c) {
864 if (nvi->def[c]->refc)
865 nvi->tex_mask |= 1 << c;
866 def[c] = nvi->def[c];
869 j = 0;
870 for (c = 0; c < 4; ++c)
871 if (nvi->tex_mask & (1 << c))
872 nvi->def[j++] = def[c];
873 for (c = 0; c < 4; ++c)
874 if (!(nvi->tex_mask & (1 << c)))
875 nvi->def[j++] = def[c];
876 assert(j == 4);
878 return 0;
881 struct nv_pass_dce {
882 struct nv_pc *pc;
883 uint removed;
886 static int
887 nv_pass_dce(struct nv_pass_dce *ctx, struct nv_basic_block *b)
889 int j;
890 struct nv_instruction *nvi, *next;
892 for (nvi = b->phi ? b->phi : b->entry; nvi; nvi = next) {
893 next = nvi->next;
895 if (inst_cullable(nvi)) {
896 nv_nvi_delete(nvi);
898 ++ctx->removed;
901 DESCEND_ARBITRARY(j, nv_pass_dce);
903 return 0;
906 /* Register allocation inserted ELSE blocks for all IF/ENDIF without ELSE.
907 * Returns TRUE if @bb initiates an IF/ELSE/ENDIF clause, or is an IF with
908 * BREAK and dummy ELSE block.
910 static INLINE boolean
911 bb_is_if_else_endif(struct nv_basic_block *bb)
913 if (!bb->out[0] || !bb->out[1])
914 return FALSE;
916 if (bb->out[0]->out_kind[0] == CFG_EDGE_LOOP_LEAVE) {
917 return (bb->out[0]->out[1] == bb->out[1]->out[0] &&
918 !bb->out[1]->out[1]);
919 } else {
920 return (bb->out[0]->out[0] == bb->out[1]->out[0] &&
921 !bb->out[0]->out[1] &&
922 !bb->out[1]->out[1]);
926 /* predicate instructions and remove branch at the end */
927 static void
928 predicate_instructions(struct nv_pc *pc, struct nv_basic_block *b,
929 struct nv_value *p, ubyte cc)
931 struct nv_instruction *nvi;
933 if (!b->entry)
934 return;
935 for (nvi = b->entry; nvi->next; nvi = nvi->next) {
936 if (!nvi_isnop(nvi)) {
937 nvi->cc = cc;
938 nv_reference(pc, &nvi->flags_src, p);
942 if (nvi->opcode == NV_OP_BRA)
943 nv_nvi_delete(nvi);
944 else
945 if (!nvi_isnop(nvi)) {
946 nvi->cc = cc;
947 nv_reference(pc, &nvi->flags_src, p);
951 /* NOTE: Run this after register allocation, we can just cut out the cflow
952 * instructions and hook the predicates to the conditional OPs if they are
953 * not using immediates; better than inserting SELECT to join definitions.
955 * NOTE: Should adapt prior optimization to make this possible more often.
957 static int
958 nv_pass_flatten(struct nv_pass *ctx, struct nv_basic_block *b)
960 struct nv_instruction *nvi;
961 struct nv_value *pred;
962 int i;
963 int n0 = 0, n1 = 0;
965 if (bb_is_if_else_endif(b)) {
967 NV50_DBGMSG(PROG_IR,
968 "pass_flatten: IF/ELSE/ENDIF construct at BB:%i\n", b->id);
970 for (n0 = 0, nvi = b->out[0]->entry; nvi; nvi = nvi->next, ++n0)
971 if (!nv50_nvi_can_predicate(nvi))
972 break;
973 if (!nvi) {
974 for (n1 = 0, nvi = b->out[1]->entry; nvi; nvi = nvi->next, ++n1)
975 if (!nv50_nvi_can_predicate(nvi))
976 break;
977 #if NV50_DEBUG & NV50_DEBUG_PROG_IR
978 if (nvi) {
979 debug_printf("cannot predicate: "); nv_print_instruction(nvi);
981 } else {
982 debug_printf("cannot predicate: "); nv_print_instruction(nvi);
983 #endif
986 if (!nvi && n0 < 12 && n1 < 12) { /* 12 as arbitrary limit */
987 assert(b->exit && b->exit->flags_src);
988 pred = b->exit->flags_src->value;
990 predicate_instructions(ctx->pc, b->out[0], pred, NV_CC_NE | NV_CC_U);
991 predicate_instructions(ctx->pc, b->out[1], pred, NV_CC_EQ);
993 assert(b->exit && b->exit->opcode == NV_OP_BRA);
994 nv_nvi_delete(b->exit);
996 if (b->exit && b->exit->opcode == NV_OP_JOINAT)
997 nv_nvi_delete(b->exit);
999 i = (b->out[0]->out_kind[0] == CFG_EDGE_LOOP_LEAVE) ? 1 : 0;
1001 if ((nvi = b->out[0]->out[i]->entry)) {
1002 nvi->is_join = 0;
1003 if (nvi->opcode == NV_OP_JOIN)
1004 nv_nvi_delete(nvi);
1008 DESCEND_ARBITRARY(i, nv_pass_flatten);
1010 return 0;
1013 /* local common subexpression elimination, stupid O(n^2) implementation */
1014 static int
1015 nv_pass_cse(struct nv_pass *ctx, struct nv_basic_block *b)
1017 struct nv_instruction *ir, *ik, *next;
1018 struct nv_instruction *entry = b->phi ? b->phi : b->entry;
1019 int s;
1020 unsigned int reps;
1022 do {
1023 reps = 0;
1024 for (ir = entry; ir; ir = next) {
1025 next = ir->next;
1026 for (ik = entry; ik != ir; ik = ik->next) {
1027 if (ir->opcode != ik->opcode || ir->fixed)
1028 continue;
1030 if (!ir->def[0] || !ik->def[0] ||
1031 ik->opcode == NV_OP_LDA ||
1032 ik->opcode == NV_OP_STA ||
1033 ik->opcode == NV_OP_MOV ||
1034 nv_is_vector_op(ik->opcode))
1035 continue; /* ignore loads, stores & moves */
1037 if (ik->src[4] || ir->src[4])
1038 continue; /* don't mess with address registers */
1040 if (ik->flags_src || ir->flags_src ||
1041 ik->flags_def || ir->flags_def)
1042 continue; /* and also not with flags, for now */
1044 if (ik->def[0]->reg.file == NV_FILE_OUT ||
1045 ir->def[0]->reg.file == NV_FILE_OUT ||
1046 !values_equal(ik->def[0], ir->def[0]))
1047 continue;
1049 for (s = 0; s < 3; ++s) {
1050 struct nv_value *a, *b;
1052 if (!ik->src[s]) {
1053 if (ir->src[s])
1054 break;
1055 continue;
1057 if (ik->src[s]->mod != ir->src[s]->mod)
1058 break;
1059 a = ik->src[s]->value;
1060 b = ir->src[s]->value;
1061 if (a == b)
1062 continue;
1063 if (a->reg.file != b->reg.file ||
1064 a->reg.id < 0 ||
1065 a->reg.id != b->reg.id)
1066 break;
1068 if (s == 3) {
1069 nv_nvi_delete(ir);
1070 ++reps;
1071 nvcg_replace_value(ctx->pc, ir->def[0], ik->def[0]);
1072 break;
1076 } while(reps);
1078 DESCEND_ARBITRARY(s, nv_pass_cse);
1080 return 0;
1083 static int
1084 nv_pc_pass0(struct nv_pc *pc, struct nv_basic_block *root)
1086 struct nv_pass_reld_elim *reldelim;
1087 struct nv_pass pass;
1088 struct nv_pass_dce dce;
1089 int ret;
1091 pass.n = 0;
1092 pass.pc = pc;
1094 /* Do this first, so we don't have to pay attention
1095 * to whether sources are supported memory loads.
1097 pc->pass_seq++;
1098 ret = nv_pass_lower_arith(&pass, root);
1099 if (ret)
1100 return ret;
1102 pc->pass_seq++;
1103 ret = nv_pass_lower_mods(&pass, root);
1104 if (ret)
1105 return ret;
1107 pc->pass_seq++;
1108 ret = nv_pass_fold_loads(&pass, root);
1109 if (ret)
1110 return ret;
1112 pc->pass_seq++;
1113 ret = nv_pass_fold_stores(&pass, root);
1114 if (ret)
1115 return ret;
1117 if (pc->opt_reload_elim) {
1118 reldelim = CALLOC_STRUCT(nv_pass_reld_elim);
1119 reldelim->pc = pc;
1120 pc->pass_seq++;
1121 ret = nv_pass_reload_elim(reldelim, root);
1122 FREE(reldelim);
1123 if (ret)
1124 return ret;
1127 pc->pass_seq++;
1128 ret = nv_pass_cse(&pass, root);
1129 if (ret)
1130 return ret;
1132 dce.pc = pc;
1133 do {
1134 dce.removed = 0;
1135 pc->pass_seq++;
1136 ret = nv_pass_dce(&dce, root);
1137 if (ret)
1138 return ret;
1139 } while (dce.removed);
1141 ret = nv_pass_tex_mask(&pass, root);
1142 if (ret)
1143 return ret;
1145 return ret;
1149 nv_pc_exec_pass0(struct nv_pc *pc)
1151 int i, ret;
1153 for (i = 0; i < pc->num_subroutines + 1; ++i)
1154 if (pc->root[i] && (ret = nv_pc_pass0(pc, pc->root[i])))
1155 return ret;
1156 return 0;