1 #include "pipe/p_context.h"
2 #include "pipe/p_defines.h"
3 #include "pipe/p_state.h"
4 #include "util/u_linkage.h"
5 #include "util/u_debug.h"
7 #include "pipe/p_shader_tokens.h"
8 #include "tgsi/tgsi_parse.h"
9 #include "tgsi/tgsi_dump.h"
10 #include "tgsi/tgsi_util.h"
11 #include "tgsi/tgsi_ureg.h"
13 #include "draw/draw_context.h"
15 #include "nvfx_context.h"
16 #include "nvfx_state.h"
17 #include "nvfx_resource.h"
19 /* TODO (at least...):
20 * 1. Indexed consts + ARL
21 * 3. NV_vp11, NV_vp2, NV_vp3 features
22 * - extra arith opcodes
30 #include "nv30_vertprog.h"
31 #include "nv40_vertprog.h"
33 struct nvfx_loop_entry
40 struct nvfx_context
* nvfx
;
41 struct pipe_shader_state pipe
;
42 struct nvfx_vertex_program
*vp
;
43 struct tgsi_shader_info
* info
;
45 struct nvfx_vertex_program_exec
*vpi
;
48 unsigned r_temps_discard
;
49 struct nvfx_reg r_result
[PIPE_MAX_SHADER_OUTPUTS
];
50 struct nvfx_reg
*r_address
;
51 struct nvfx_reg
*r_temp
;
52 struct nvfx_reg
*r_const
;
53 struct nvfx_reg r_0_1
;
60 struct util_dynarray label_relocs
;
61 struct util_dynarray loop_stack
;
64 static struct nvfx_reg
65 temp(struct nvfx_vpc
*vpc
)
67 int idx
= ffs(~vpc
->r_temps
) - 1;
70 NOUVEAU_ERR("out of temps!!\n");
72 return nvfx_reg(NVFXSR_TEMP
, 0);
75 vpc
->r_temps
|= (1 << idx
);
76 vpc
->r_temps_discard
|= (1 << idx
);
77 return nvfx_reg(NVFXSR_TEMP
, idx
);
81 release_temps(struct nvfx_vpc
*vpc
)
83 vpc
->r_temps
&= ~vpc
->r_temps_discard
;
84 vpc
->r_temps_discard
= 0;
87 static struct nvfx_reg
88 constant(struct nvfx_vpc
*vpc
, int pipe
, float x
, float y
, float z
, float w
)
90 struct nvfx_vertex_program
*vp
= vpc
->vp
;
91 struct nvfx_vertex_program_data
*vpd
;
95 for (idx
= 0; idx
< vp
->nr_consts
; idx
++) {
96 if (vp
->consts
[idx
].index
== pipe
)
97 return nvfx_reg(NVFXSR_CONST
, idx
);
101 idx
= vp
->nr_consts
++;
102 vp
->consts
= realloc(vp
->consts
, sizeof(*vpd
) * vp
->nr_consts
);
103 vpd
= &vp
->consts
[idx
];
110 return nvfx_reg(NVFXSR_CONST
, idx
);
113 #define arith(s,t,o,d,m,s0,s1,s2) \
114 nvfx_insn((s), (NVFX_VP_INST_SLOT_##t << 7) | NVFX_VP_INST_##t##_OP_##o, -1, (d), (m), (s0), (s1), (s2))
117 emit_src(struct nvfx_context
* nvfx
, struct nvfx_vpc
*vpc
, uint32_t *hw
, int pos
, struct nvfx_src src
)
119 struct nvfx_vertex_program
*vp
= vpc
->vp
;
121 struct nvfx_relocation reloc
;
123 switch (src
.reg
.type
) {
125 sr
|= (NVFX_VP(SRC_REG_TYPE_TEMP
) << NVFX_VP(SRC_REG_TYPE_SHIFT
));
126 sr
|= (src
.reg
.index
<< NVFX_VP(SRC_TEMP_SRC_SHIFT
));
129 sr
|= (NVFX_VP(SRC_REG_TYPE_INPUT
) <<
130 NVFX_VP(SRC_REG_TYPE_SHIFT
));
131 vp
->ir
|= (1 << src
.reg
.index
);
132 hw
[1] |= (src
.reg
.index
<< NVFX_VP(INST_INPUT_SRC_SHIFT
));
135 sr
|= (NVFX_VP(SRC_REG_TYPE_CONST
) <<
136 NVFX_VP(SRC_REG_TYPE_SHIFT
));
137 reloc
.location
= vp
->nr_insns
- 1;
138 reloc
.target
= src
.reg
.index
;
139 util_dynarray_append(&vp
->const_relocs
, struct nvfx_relocation
, reloc
);
142 sr
|= (NVFX_VP(SRC_REG_TYPE_INPUT
) <<
143 NVFX_VP(SRC_REG_TYPE_SHIFT
));
150 sr
|= NVFX_VP(SRC_NEGATE
);
153 hw
[0] |= (1 << (21 + pos
));
155 sr
|= ((src
.swz
[0] << NVFX_VP(SRC_SWZ_X_SHIFT
)) |
156 (src
.swz
[1] << NVFX_VP(SRC_SWZ_Y_SHIFT
)) |
157 (src
.swz
[2] << NVFX_VP(SRC_SWZ_Z_SHIFT
)) |
158 (src
.swz
[3] << NVFX_VP(SRC_SWZ_W_SHIFT
)));
161 if(src
.reg
.type
== NVFXSR_CONST
)
162 hw
[3] |= NVFX_VP(INST_INDEX_CONST
);
163 else if(src
.reg
.type
== NVFXSR_INPUT
)
164 hw
[0] |= NVFX_VP(INST_INDEX_INPUT
);
168 hw
[0] |= NVFX_VP(INST_ADDR_REG_SELECT_1
);
169 hw
[0] |= src
.indirect_swz
<< NVFX_VP(INST_ADDR_SWZ_SHIFT
);
174 hw
[1] |= ((sr
& NVFX_VP(SRC0_HIGH_MASK
)) >>
175 NVFX_VP(SRC0_HIGH_SHIFT
)) << NVFX_VP(INST_SRC0H_SHIFT
);
176 hw
[2] |= (sr
& NVFX_VP(SRC0_LOW_MASK
)) <<
177 NVFX_VP(INST_SRC0L_SHIFT
);
180 hw
[2] |= sr
<< NVFX_VP(INST_SRC1_SHIFT
);
183 hw
[2] |= ((sr
& NVFX_VP(SRC2_HIGH_MASK
)) >>
184 NVFX_VP(SRC2_HIGH_SHIFT
)) << NVFX_VP(INST_SRC2H_SHIFT
);
185 hw
[3] |= (sr
& NVFX_VP(SRC2_LOW_MASK
)) <<
186 NVFX_VP(INST_SRC2L_SHIFT
);
194 emit_dst(struct nvfx_context
* nvfx
, struct nvfx_vpc
*vpc
, uint32_t *hw
, int slot
, struct nvfx_reg dst
)
196 struct nvfx_vertex_program
*vp
= vpc
->vp
;
201 hw
[0] |= NV30_VP_INST_DEST_TEMP_ID_MASK
;
203 hw
[3] |= NV40_VP_INST_DEST_MASK
;
205 hw
[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK
;
207 hw
[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK
;
212 hw
[0] |= (dst
.index
<< NV30_VP_INST_DEST_TEMP_ID_SHIFT
);
214 hw
[3] |= NV40_VP_INST_DEST_MASK
;
216 hw
[0] |= (dst
.index
<< NV40_VP_INST_VEC_DEST_TEMP_SHIFT
);
218 hw
[3] |= (dst
.index
<< NV40_VP_INST_SCA_DEST_TEMP_SHIFT
);
222 /* TODO: this may be wrong because on nv30 COL0 and BFC0 are swapped */
225 case NV30_VP_INST_DEST_CLP(0):
226 dst
.index
= NVFX_VP(INST_DEST_FOGC
);
228 case NV30_VP_INST_DEST_CLP(1):
229 dst
.index
= NVFX_VP(INST_DEST_FOGC
);
231 case NV30_VP_INST_DEST_CLP(2):
232 dst
.index
= NVFX_VP(INST_DEST_FOGC
);
234 case NV30_VP_INST_DEST_CLP(3):
235 dst
.index
= NVFX_VP(INST_DEST_PSZ
);
237 case NV30_VP_INST_DEST_CLP(4):
238 dst
.index
= NVFX_VP(INST_DEST_PSZ
);
240 case NV30_VP_INST_DEST_CLP(5):
241 dst
.index
= NVFX_VP(INST_DEST_PSZ
);
243 case NV40_VP_INST_DEST_COL0
: vp
->or |= (1 << 0); break;
244 case NV40_VP_INST_DEST_COL1
: vp
->or |= (1 << 1); break;
245 case NV40_VP_INST_DEST_BFC0
: vp
->or |= (1 << 2); break;
246 case NV40_VP_INST_DEST_BFC1
: vp
->or |= (1 << 3); break;
247 case NV40_VP_INST_DEST_FOGC
: vp
->or |= (1 << 4); break;
248 case NV40_VP_INST_DEST_PSZ
: vp
->or |= (1 << 5); break;
253 hw
[3] |= (dst
.index
<< NV30_VP_INST_DEST_SHIFT
);
254 hw
[0] |= NV30_VP_INST_VEC_DEST_TEMP_MASK
;
256 /*XXX: no way this is entirely correct, someone needs to
257 * figure out what exactly it is.
261 hw
[3] |= (dst
.index
<< NV40_VP_INST_DEST_SHIFT
);
263 hw
[0] |= NV40_VP_INST_VEC_RESULT
;
264 hw
[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK
;
266 hw
[3] |= NV40_VP_INST_SCA_RESULT
;
267 hw
[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK
;
277 nvfx_vp_emit(struct nvfx_vpc
*vpc
, struct nvfx_insn insn
)
279 struct nvfx_context
* nvfx
= vpc
->nvfx
;
280 struct nvfx_vertex_program
*vp
= vpc
->vp
;
281 unsigned slot
= insn
.op
>> 7;
282 unsigned op
= insn
.op
& 0x7f;
285 vp
->insns
= realloc(vp
->insns
, ++vp
->nr_insns
* sizeof(*vpc
->vpi
));
286 vpc
->vpi
= &vp
->insns
[vp
->nr_insns
- 1];
287 memset(vpc
->vpi
, 0, sizeof(*vpc
->vpi
));
291 hw
[0] |= (insn
.cc_test
<< NVFX_VP(INST_COND_SHIFT
));
292 hw
[0] |= ((insn
.cc_swz
[0] << NVFX_VP(INST_COND_SWZ_X_SHIFT
)) |
293 (insn
.cc_swz
[1] << NVFX_VP(INST_COND_SWZ_Y_SHIFT
)) |
294 (insn
.cc_swz
[2] << NVFX_VP(INST_COND_SWZ_Z_SHIFT
)) |
295 (insn
.cc_swz
[3] << NVFX_VP(INST_COND_SWZ_W_SHIFT
)));
297 hw
[0] |= NVFX_VP(INST_COND_UPDATE_ENABLE
);
301 assert(nvfx
->use_nv4x
);
303 hw
[0] |= NV40_VP_INST_SATURATE
;
308 hw
[1] |= (op
<< NV30_VP_INST_VEC_OPCODE_SHIFT
);
311 hw
[0] |= ((op
>> 4) << NV30_VP_INST_SCA_OPCODEH_SHIFT
);
312 hw
[1] |= ((op
& 0xf) << NV30_VP_INST_SCA_OPCODEL_SHIFT
);
314 // hw[3] |= NVFX_VP(INST_SCA_DEST_TEMP_MASK);
315 // hw[3] |= (mask << NVFX_VP(INST_VEC_WRITEMASK_SHIFT));
317 if (insn
.dst
.type
== NVFXSR_OUTPUT
) {
319 hw
[3] |= (insn
.mask
<< NV30_VP_INST_SDEST_WRITEMASK_SHIFT
);
321 hw
[3] |= (insn
.mask
<< NV30_VP_INST_VDEST_WRITEMASK_SHIFT
);
324 hw
[3] |= (insn
.mask
<< NV30_VP_INST_STEMP_WRITEMASK_SHIFT
);
326 hw
[3] |= (insn
.mask
<< NV30_VP_INST_VTEMP_WRITEMASK_SHIFT
);
330 hw
[1] |= (op
<< NV40_VP_INST_VEC_OPCODE_SHIFT
);
331 hw
[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK
;
332 hw
[3] |= (insn
.mask
<< NV40_VP_INST_VEC_WRITEMASK_SHIFT
);
334 hw
[1] |= (op
<< NV40_VP_INST_SCA_OPCODE_SHIFT
);
335 hw
[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK
;
336 hw
[3] |= (insn
.mask
<< NV40_VP_INST_SCA_WRITEMASK_SHIFT
);
340 emit_dst(nvfx
, vpc
, hw
, slot
, insn
.dst
);
341 emit_src(nvfx
, vpc
, hw
, 0, insn
.src
[0]);
342 emit_src(nvfx
, vpc
, hw
, 1, insn
.src
[1]);
343 emit_src(nvfx
, vpc
, hw
, 2, insn
.src
[2]);
345 // if(insn.src[0].indirect || op == NVFX_VP_INST_VEC_OP_ARL)
346 // hw[3] |= NV40_VP_INST_SCA_RESULT;
349 static inline struct nvfx_src
350 tgsi_src(struct nvfx_vpc
*vpc
, const struct tgsi_full_src_register
*fsrc
) {
353 switch (fsrc
->Register
.File
) {
354 case TGSI_FILE_INPUT
:
355 src
.reg
= nvfx_reg(NVFXSR_INPUT
, fsrc
->Register
.Index
);
357 case TGSI_FILE_CONSTANT
:
358 src
.reg
= vpc
->r_const
[fsrc
->Register
.Index
];
360 case TGSI_FILE_IMMEDIATE
:
361 src
.reg
= vpc
->imm
[fsrc
->Register
.Index
];
363 case TGSI_FILE_TEMPORARY
:
364 src
.reg
= vpc
->r_temp
[fsrc
->Register
.Index
];
367 NOUVEAU_ERR("bad src file\n");
373 src
.abs
= fsrc
->Register
.Absolute
;
374 src
.negate
= fsrc
->Register
.Negate
;
375 src
.swz
[0] = fsrc
->Register
.SwizzleX
;
376 src
.swz
[1] = fsrc
->Register
.SwizzleY
;
377 src
.swz
[2] = fsrc
->Register
.SwizzleZ
;
378 src
.swz
[3] = fsrc
->Register
.SwizzleW
;
380 src
.indirect_reg
= 0;
381 src
.indirect_swz
= 0;
383 if(fsrc
->Register
.Indirect
) {
384 if(fsrc
->Indirect
.File
== TGSI_FILE_ADDRESS
&&
385 (fsrc
->Register
.File
== TGSI_FILE_CONSTANT
|| fsrc
->Register
.File
== TGSI_FILE_INPUT
))
388 src
.indirect_reg
= fsrc
->Indirect
.Index
;
389 src
.indirect_swz
= fsrc
->Indirect
.SwizzleX
;
400 static INLINE
struct nvfx_reg
401 tgsi_dst(struct nvfx_vpc
*vpc
, const struct tgsi_full_dst_register
*fdst
) {
404 switch (fdst
->Register
.File
) {
406 dst
= nvfx_reg(NVFXSR_NONE
, 0);
408 case TGSI_FILE_OUTPUT
:
409 dst
= vpc
->r_result
[fdst
->Register
.Index
];
411 case TGSI_FILE_TEMPORARY
:
412 dst
= vpc
->r_temp
[fdst
->Register
.Index
];
414 case TGSI_FILE_ADDRESS
:
415 dst
= vpc
->r_address
[fdst
->Register
.Index
];
418 NOUVEAU_ERR("bad dst file %i\n", fdst
->Register
.File
);
432 if (tgsi
& TGSI_WRITEMASK_X
) mask
|= NVFX_VP_MASK_X
;
433 if (tgsi
& TGSI_WRITEMASK_Y
) mask
|= NVFX_VP_MASK_Y
;
434 if (tgsi
& TGSI_WRITEMASK_Z
) mask
|= NVFX_VP_MASK_Z
;
435 if (tgsi
& TGSI_WRITEMASK_W
) mask
|= NVFX_VP_MASK_W
;
440 nvfx_vertprog_parse_instruction(struct nvfx_context
* nvfx
, struct nvfx_vpc
*vpc
,
441 unsigned idx
, const struct tgsi_full_instruction
*finst
)
443 struct nvfx_src src
[3], tmp
;
445 struct nvfx_reg final_dst
;
446 struct nvfx_src none
= nvfx_src(nvfx_reg(NVFXSR_NONE
, 0));
447 struct nvfx_insn insn
;
448 struct nvfx_relocation reloc
;
449 struct nvfx_loop_entry loop
;
452 int ai
= -1, ci
= -1, ii
= -1;
454 unsigned sub_depth
= 0;
456 for (i
= 0; i
< finst
->Instruction
.NumSrcRegs
; i
++) {
457 const struct tgsi_full_src_register
*fsrc
;
459 fsrc
= &finst
->Src
[i
];
460 if (fsrc
->Register
.File
== TGSI_FILE_TEMPORARY
) {
461 src
[i
] = tgsi_src(vpc
, fsrc
);
465 for (i
= 0; i
< finst
->Instruction
.NumSrcRegs
; i
++) {
466 const struct tgsi_full_src_register
*fsrc
;
468 fsrc
= &finst
->Src
[i
];
470 switch (fsrc
->Register
.File
) {
471 case TGSI_FILE_INPUT
:
472 if (ai
== -1 || ai
== fsrc
->Register
.Index
) {
473 ai
= fsrc
->Register
.Index
;
474 src
[i
] = tgsi_src(vpc
, fsrc
);
476 src
[i
] = nvfx_src(temp(vpc
));
477 nvfx_vp_emit(vpc
, arith(0, VEC
, MOV
, src
[i
].reg
, NVFX_VP_MASK_ALL
, tgsi_src(vpc
, fsrc
), none
, none
));
480 case TGSI_FILE_CONSTANT
:
481 if ((ci
== -1 && ii
== -1) ||
482 ci
== fsrc
->Register
.Index
) {
483 ci
= fsrc
->Register
.Index
;
484 src
[i
] = tgsi_src(vpc
, fsrc
);
486 src
[i
] = nvfx_src(temp(vpc
));
487 nvfx_vp_emit(vpc
, arith(0, VEC
, MOV
, src
[i
].reg
, NVFX_VP_MASK_ALL
, tgsi_src(vpc
, fsrc
), none
, none
));
490 case TGSI_FILE_IMMEDIATE
:
491 if ((ci
== -1 && ii
== -1) ||
492 ii
== fsrc
->Register
.Index
) {
493 ii
= fsrc
->Register
.Index
;
494 src
[i
] = tgsi_src(vpc
, fsrc
);
496 src
[i
] = nvfx_src(temp(vpc
));
497 nvfx_vp_emit(vpc
, arith(0, VEC
, MOV
, src
[i
].reg
, NVFX_VP_MASK_ALL
, tgsi_src(vpc
, fsrc
), none
, none
));
500 case TGSI_FILE_TEMPORARY
:
504 NOUVEAU_ERR("bad src file\n");
509 for (i
= 0; i
< finst
->Instruction
.NumSrcRegs
; i
++) {
510 if(src
[i
].reg
.type
< 0)
514 if(finst
->Dst
[0].Register
.File
== TGSI_FILE_ADDRESS
&&
515 finst
->Instruction
.Opcode
!= TGSI_OPCODE_ARL
)
518 final_dst
= dst
= tgsi_dst(vpc
, &finst
->Dst
[0]);
519 mask
= tgsi_mask(finst
->Dst
[0].Register
.WriteMask
);
520 if(finst
->Instruction
.Saturate
== TGSI_SAT_ZERO_ONE
)
522 assert(finst
->Instruction
.Opcode
!= TGSI_OPCODE_ARL
);
525 else if(dst
.type
!= NVFXSR_TEMP
)
529 switch (finst
->Instruction
.Opcode
) {
530 case TGSI_OPCODE_ABS
:
531 nvfx_vp_emit(vpc
, arith(sat
, VEC
, MOV
, dst
, mask
, abs(src
[0]), none
, none
));
533 case TGSI_OPCODE_ADD
:
534 nvfx_vp_emit(vpc
, arith(sat
, VEC
, ADD
, dst
, mask
, src
[0], none
, src
[1]));
536 case TGSI_OPCODE_ARL
:
537 nvfx_vp_emit(vpc
, arith(0, VEC
, ARL
, dst
, mask
, src
[0], none
, none
));
539 case TGSI_OPCODE_CMP
:
540 insn
= arith(0, VEC
, MOV
, none
.reg
, mask
, src
[0], none
, none
);
542 nvfx_vp_emit(vpc
, insn
);
544 insn
= arith(sat
, VEC
, MOV
, dst
, mask
, src
[2], none
, none
);
545 insn
.cc_test
= NVFX_COND_GE
;
546 nvfx_vp_emit(vpc
, insn
);
548 insn
= arith(sat
, VEC
, MOV
, dst
, mask
, src
[1], none
, none
);
549 insn
.cc_test
= NVFX_COND_LT
;
550 nvfx_vp_emit(vpc
, insn
);
552 case TGSI_OPCODE_COS
:
553 nvfx_vp_emit(vpc
, arith(sat
, SCA
, COS
, dst
, mask
, none
, none
, src
[0]));
555 case TGSI_OPCODE_DP2
:
556 tmp
= nvfx_src(temp(vpc
));
557 nvfx_vp_emit(vpc
, arith(0, VEC
, MUL
, tmp
.reg
, NVFX_VP_MASK_X
| NVFX_VP_MASK_Y
, src
[0], src
[1], none
));
558 nvfx_vp_emit(vpc
, arith(sat
, VEC
, ADD
, dst
, mask
, swz(tmp
, X
, X
, X
, X
), none
, swz(tmp
, Y
, Y
, Y
, Y
)));
560 case TGSI_OPCODE_DP3
:
561 nvfx_vp_emit(vpc
, arith(sat
, VEC
, DP3
, dst
, mask
, src
[0], src
[1], none
));
563 case TGSI_OPCODE_DP4
:
564 nvfx_vp_emit(vpc
, arith(sat
, VEC
, DP4
, dst
, mask
, src
[0], src
[1], none
));
566 case TGSI_OPCODE_DPH
:
567 nvfx_vp_emit(vpc
, arith(sat
, VEC
, DPH
, dst
, mask
, src
[0], src
[1], none
));
569 case TGSI_OPCODE_DST
:
570 nvfx_vp_emit(vpc
, arith(sat
, VEC
, DST
, dst
, mask
, src
[0], src
[1], none
));
572 case TGSI_OPCODE_EX2
:
573 nvfx_vp_emit(vpc
, arith(sat
, SCA
, EX2
, dst
, mask
, none
, none
, src
[0]));
575 case TGSI_OPCODE_EXP
:
576 nvfx_vp_emit(vpc
, arith(sat
, SCA
, EXP
, dst
, mask
, none
, none
, src
[0]));
578 case TGSI_OPCODE_FLR
:
579 nvfx_vp_emit(vpc
, arith(sat
, VEC
, FLR
, dst
, mask
, src
[0], none
, none
));
581 case TGSI_OPCODE_FRC
:
582 nvfx_vp_emit(vpc
, arith(sat
, VEC
, FRC
, dst
, mask
, src
[0], none
, none
));
584 case TGSI_OPCODE_LG2
:
585 nvfx_vp_emit(vpc
, arith(sat
, SCA
, LG2
, dst
, mask
, none
, none
, src
[0]));
587 case TGSI_OPCODE_LIT
:
588 nvfx_vp_emit(vpc
, arith(sat
, SCA
, LIT
, dst
, mask
, none
, none
, src
[0]));
590 case TGSI_OPCODE_LOG
:
591 nvfx_vp_emit(vpc
, arith(sat
, SCA
, LOG
, dst
, mask
, none
, none
, src
[0]));
593 case TGSI_OPCODE_LRP
:
594 tmp
= nvfx_src(temp(vpc
));
595 nvfx_vp_emit(vpc
, arith(0, VEC
, MAD
, tmp
.reg
, mask
, neg(src
[0]), src
[2], src
[2]));
596 nvfx_vp_emit(vpc
, arith(sat
, VEC
, MAD
, dst
, mask
, src
[0], src
[1], tmp
));
598 case TGSI_OPCODE_MAD
:
599 nvfx_vp_emit(vpc
, arith(sat
, VEC
, MAD
, dst
, mask
, src
[0], src
[1], src
[2]));
601 case TGSI_OPCODE_MAX
:
602 nvfx_vp_emit(vpc
, arith(sat
, VEC
, MAX
, dst
, mask
, src
[0], src
[1], none
));
604 case TGSI_OPCODE_MIN
:
605 nvfx_vp_emit(vpc
, arith(sat
, VEC
, MIN
, dst
, mask
, src
[0], src
[1], none
));
607 case TGSI_OPCODE_MOV
:
608 nvfx_vp_emit(vpc
, arith(sat
, VEC
, MOV
, dst
, mask
, src
[0], none
, none
));
610 case TGSI_OPCODE_MUL
:
611 nvfx_vp_emit(vpc
, arith(sat
, VEC
, MUL
, dst
, mask
, src
[0], src
[1], none
));
613 case TGSI_OPCODE_NOP
:
615 case TGSI_OPCODE_POW
:
616 tmp
= nvfx_src(temp(vpc
));
617 nvfx_vp_emit(vpc
, arith(0, SCA
, LG2
, tmp
.reg
, NVFX_VP_MASK_X
, none
, none
, swz(src
[0], X
, X
, X
, X
)));
618 nvfx_vp_emit(vpc
, arith(0, VEC
, MUL
, tmp
.reg
, NVFX_VP_MASK_X
, swz(tmp
, X
, X
, X
, X
), swz(src
[1], X
, X
, X
, X
), none
));
619 nvfx_vp_emit(vpc
, arith(sat
, SCA
, EX2
, dst
, mask
, none
, none
, swz(tmp
, X
, X
, X
, X
)));
621 case TGSI_OPCODE_RCP
:
622 nvfx_vp_emit(vpc
, arith(sat
, SCA
, RCP
, dst
, mask
, none
, none
, src
[0]));
624 case TGSI_OPCODE_RSQ
:
625 nvfx_vp_emit(vpc
, arith(sat
, SCA
, RSQ
, dst
, mask
, none
, none
, abs(src
[0])));
627 case TGSI_OPCODE_SEQ
:
628 nvfx_vp_emit(vpc
, arith(sat
, VEC
, SEQ
, dst
, mask
, src
[0], src
[1], none
));
630 case TGSI_OPCODE_SFL
:
631 nvfx_vp_emit(vpc
, arith(sat
, VEC
, SFL
, dst
, mask
, src
[0], src
[1], none
));
633 case TGSI_OPCODE_SGE
:
634 nvfx_vp_emit(vpc
, arith(sat
, VEC
, SGE
, dst
, mask
, src
[0], src
[1], none
));
636 case TGSI_OPCODE_SGT
:
637 nvfx_vp_emit(vpc
, arith(sat
, VEC
, SGT
, dst
, mask
, src
[0], src
[1], none
));
639 case TGSI_OPCODE_SIN
:
640 nvfx_vp_emit(vpc
, arith(sat
, SCA
, SIN
, dst
, mask
, none
, none
, src
[0]));
642 case TGSI_OPCODE_SLE
:
643 nvfx_vp_emit(vpc
, arith(sat
, VEC
, SLE
, dst
, mask
, src
[0], src
[1], none
));
645 case TGSI_OPCODE_SLT
:
646 nvfx_vp_emit(vpc
, arith(sat
, VEC
, SLT
, dst
, mask
, src
[0], src
[1], none
));
648 case TGSI_OPCODE_SNE
:
649 nvfx_vp_emit(vpc
, arith(sat
, VEC
, SNE
, dst
, mask
, src
[0], src
[1], none
));
651 case TGSI_OPCODE_SSG
:
652 nvfx_vp_emit(vpc
, arith(sat
, VEC
, SSG
, dst
, mask
, src
[0], src
[1], none
));
654 case TGSI_OPCODE_STR
:
655 nvfx_vp_emit(vpc
, arith(sat
, VEC
, STR
, dst
, mask
, src
[0], src
[1], none
));
657 case TGSI_OPCODE_SUB
:
658 nvfx_vp_emit(vpc
, arith(sat
, VEC
, ADD
, dst
, mask
, src
[0], none
, neg(src
[1])));
660 case TGSI_OPCODE_TRUNC
:
661 tmp
= nvfx_src(temp(vpc
));
662 insn
= arith(0, VEC
, MOV
, none
.reg
, mask
, src
[0], none
, none
);
664 nvfx_vp_emit(vpc
, insn
);
666 nvfx_vp_emit(vpc
, arith(0, VEC
, FLR
, tmp
.reg
, mask
, abs(src
[0]), none
, none
));
667 nvfx_vp_emit(vpc
, arith(sat
, VEC
, MOV
, dst
, mask
, tmp
, none
, none
));
669 insn
= arith(sat
, VEC
, MOV
, dst
, mask
, neg(tmp
), none
, none
);
670 insn
.cc_test
= NVFX_COND_LT
;
671 nvfx_vp_emit(vpc
, insn
);
673 case TGSI_OPCODE_XPD
:
674 tmp
= nvfx_src(temp(vpc
));
675 nvfx_vp_emit(vpc
, arith(0, VEC
, MUL
, tmp
.reg
, mask
, swz(src
[0], Z
, X
, Y
, Y
), swz(src
[1], Y
, Z
, X
, X
), none
));
676 nvfx_vp_emit(vpc
, arith(sat
, VEC
, MAD
, dst
, (mask
& ~NVFX_VP_MASK_W
), swz(src
[0], Y
, Z
, X
, X
), swz(src
[1], Z
, X
, Y
, Y
), neg(tmp
)));
680 insn
= arith(0, VEC
, MOV
, none
.reg
, NVFX_VP_MASK_X
, src
[0], none
, none
);
682 nvfx_vp_emit(vpc
, insn
);
684 reloc
.location
= vpc
->vp
->nr_insns
;
685 reloc
.target
= finst
->Label
.Label
+ 1;
686 util_dynarray_append(&vpc
->label_relocs
, struct nvfx_relocation
, reloc
);
688 insn
= arith(0, SCA
, BRA
, none
.reg
, 0, none
, none
, none
);
689 insn
.cc_test
= NVFX_COND_EQ
;
690 insn
.cc_swz
[0] = insn
.cc_swz
[1] = insn
.cc_swz
[2] = insn
.cc_swz
[3] = 0;
691 nvfx_vp_emit(vpc
, insn
);
694 case TGSI_OPCODE_ELSE
:
695 case TGSI_OPCODE_BRA
:
696 case TGSI_OPCODE_CAL
:
697 reloc
.location
= vpc
->vp
->nr_insns
;
698 reloc
.target
= finst
->Label
.Label
;
699 util_dynarray_append(&vpc
->label_relocs
, struct nvfx_relocation
, reloc
);
701 if(finst
->Instruction
.Opcode
== TGSI_OPCODE_CAL
)
702 insn
= arith(0, SCA
, CAL
, none
.reg
, 0, none
, none
, none
);
704 insn
= arith(0, SCA
, BRA
, none
.reg
, 0, none
, none
, none
);
705 nvfx_vp_emit(vpc
, insn
);
708 case TGSI_OPCODE_RET
:
709 if(sub_depth
|| !nvfx
->use_vp_clipping
) {
711 tmp
.swz
[0] = tmp
.swz
[1] = tmp
.swz
[2] = tmp
.swz
[3] = 0;
712 nvfx_vp_emit(vpc
, arith(0, SCA
, RET
, none
.reg
, 0, none
, none
, tmp
));
714 reloc
.location
= vpc
->vp
->nr_insns
;
715 reloc
.target
= vpc
->info
->num_instructions
;
716 util_dynarray_append(&vpc
->label_relocs
, struct nvfx_relocation
, reloc
);
717 nvfx_vp_emit(vpc
, arith(0, SCA
, BRA
, none
.reg
, 0, none
, none
, none
));
721 case TGSI_OPCODE_BGNSUB
:
724 case TGSI_OPCODE_ENDSUB
:
727 case TGSI_OPCODE_ENDIF
:
728 /* nothing to do here */
731 case TGSI_OPCODE_BGNLOOP
:
732 loop
.cont_target
= idx
;
733 loop
.brk_target
= finst
->Label
.Label
+ 1;
734 util_dynarray_append(&vpc
->loop_stack
, struct nvfx_loop_entry
, loop
);
737 case TGSI_OPCODE_ENDLOOP
:
738 loop
= util_dynarray_pop(&vpc
->loop_stack
, struct nvfx_loop_entry
);
740 reloc
.location
= vpc
->vp
->nr_insns
;
741 reloc
.target
= loop
.cont_target
;
742 util_dynarray_append(&vpc
->label_relocs
, struct nvfx_relocation
, reloc
);
744 nvfx_vp_emit(vpc
, arith(0, SCA
, BRA
, none
.reg
, 0, none
, none
, none
));
747 case TGSI_OPCODE_CONT
:
748 loop
= util_dynarray_top(&vpc
->loop_stack
, struct nvfx_loop_entry
);
750 reloc
.location
= vpc
->vp
->nr_insns
;
751 reloc
.target
= loop
.cont_target
;
752 util_dynarray_append(&vpc
->label_relocs
, struct nvfx_relocation
, reloc
);
754 nvfx_vp_emit(vpc
, arith(0, SCA
, BRA
, none
.reg
, 0, none
, none
, none
));
757 case TGSI_OPCODE_BRK
:
758 loop
= util_dynarray_top(&vpc
->loop_stack
, struct nvfx_loop_entry
);
760 reloc
.location
= vpc
->vp
->nr_insns
;
761 reloc
.target
= loop
.brk_target
;
762 util_dynarray_append(&vpc
->label_relocs
, struct nvfx_relocation
, reloc
);
764 nvfx_vp_emit(vpc
, arith(0, SCA
, BRA
, none
.reg
, 0, none
, none
, none
));
767 case TGSI_OPCODE_END
:
769 if(nvfx
->use_vp_clipping
) {
770 if(idx
!= (vpc
->info
->num_instructions
- 1)) {
771 reloc
.location
= vpc
->vp
->nr_insns
;
772 reloc
.target
= vpc
->info
->num_instructions
;
773 util_dynarray_append(&vpc
->label_relocs
, struct nvfx_relocation
, reloc
);
774 nvfx_vp_emit(vpc
, arith(0, SCA
, BRA
, none
.reg
, 0, none
, none
, none
));
777 if(vpc
->vp
->nr_insns
)
778 vpc
->vp
->insns
[vpc
->vp
->nr_insns
- 1].data
[3] |= NVFX_VP_INST_LAST
;
779 nvfx_vp_emit(vpc
, arith(0, VEC
, NOP
, none
.reg
, 0, none
, none
, none
));
780 vpc
->vp
->insns
[vpc
->vp
->nr_insns
- 1].data
[3] |= NVFX_VP_INST_LAST
;
785 NOUVEAU_ERR("invalid opcode %d\n", finst
->Instruction
.Opcode
);
789 if(finst
->Instruction
.Saturate
== TGSI_SAT_ZERO_ONE
&& !nvfx
->use_nv4x
)
792 vpc
->r_0_1
= constant(vpc
, -1, 0, 1, 0, 0);
793 nvfx_vp_emit(vpc
, arith(0, VEC
, MAX
, dst
, mask
, nvfx_src(dst
), swz(nvfx_src(vpc
->r_0_1
), X
, X
, X
, X
), none
));
794 nvfx_vp_emit(vpc
, arith(0, VEC
, MIN
, final_dst
, mask
, nvfx_src(dst
), swz(nvfx_src(vpc
->r_0_1
), Y
, Y
, Y
, Y
), none
));
802 nvfx_vertprog_parse_decl_output(struct nvfx_context
* nvfx
, struct nvfx_vpc
*vpc
,
803 const struct tgsi_full_declaration
*fdec
)
805 unsigned idx
= fdec
->Range
.First
;
808 switch (fdec
->Semantic
.Name
) {
809 case TGSI_SEMANTIC_POSITION
:
810 hw
= NVFX_VP(INST_DEST_POS
);
813 case TGSI_SEMANTIC_COLOR
:
814 if (fdec
->Semantic
.Index
== 0) {
815 hw
= NVFX_VP(INST_DEST_COL0
);
817 if (fdec
->Semantic
.Index
== 1) {
818 hw
= NVFX_VP(INST_DEST_COL1
);
820 NOUVEAU_ERR("bad colour semantic index\n");
824 case TGSI_SEMANTIC_BCOLOR
:
825 if (fdec
->Semantic
.Index
== 0) {
826 hw
= NVFX_VP(INST_DEST_BFC0
);
828 if (fdec
->Semantic
.Index
== 1) {
829 hw
= NVFX_VP(INST_DEST_BFC1
);
831 NOUVEAU_ERR("bad bcolour semantic index\n");
835 case TGSI_SEMANTIC_FOG
:
836 hw
= NVFX_VP(INST_DEST_FOGC
);
838 case TGSI_SEMANTIC_PSIZE
:
839 hw
= NVFX_VP(INST_DEST_PSZ
);
841 case TGSI_SEMANTIC_GENERIC
:
842 hw
= (vpc
->vp
->generic_to_fp_input
[fdec
->Semantic
.Index
] & 0xf) - NVFX_FP_OP_INPUT_SRC_TC(0);
844 hw
= NVFX_VP(INST_DEST_TC(hw
));
845 else if(hw
== 9) /* TODO: this is correct, but how does this overlapping work exactly? */
846 hw
= NV40_VP_INST_DEST_PSZ
;
850 case TGSI_SEMANTIC_EDGEFLAG
:
851 /* not really an error just a fallback */
852 NOUVEAU_ERR("cannot handle edgeflag output\n");
855 NOUVEAU_ERR("bad output semantic\n");
859 vpc
->r_result
[idx
] = nvfx_reg(NVFXSR_OUTPUT
, hw
);
864 nvfx_vertprog_prepare(struct nvfx_context
* nvfx
, struct nvfx_vpc
*vpc
)
866 struct tgsi_parse_context p
;
867 int high_const
= -1, high_temp
= -1, high_addr
= -1, nr_imm
= 0, i
;
868 struct util_semantic_set set
;
869 unsigned char sem_layout
[10];
870 unsigned num_outputs
;
871 unsigned num_texcoords
= nvfx
->is_nv4x
? 10 : 8;
873 num_outputs
= util_semantic_set_from_program_file(&set
, vpc
->pipe
.tokens
, TGSI_FILE_OUTPUT
);
875 if(num_outputs
> num_texcoords
) {
876 NOUVEAU_ERR("too many vertex program outputs: %i\n", num_outputs
);
879 util_semantic_layout_from_set(sem_layout
, &set
, num_texcoords
, num_texcoords
);
881 /* hope 0xf is (0, 0, 0, 1) initialized; otherwise, we are _probably_ not required to do this */
882 memset(vpc
->vp
->generic_to_fp_input
, 0x0f, sizeof(vpc
->vp
->generic_to_fp_input
));
883 for(int i
= 0; i
< num_texcoords
; ++i
) {
884 if(sem_layout
[i
] == 0xff)
886 //printf("vp: GENERIC[%i] to fpreg %i\n", sem_layout[i], NVFX_FP_OP_INPUT_SRC_TC(0) + i);
887 vpc
->vp
->generic_to_fp_input
[sem_layout
[i
]] = 0xf0 | NVFX_FP_OP_INPUT_SRC_TC(i
);
890 vpc
->vp
->sprite_fp_input
= -1;
891 for(int i
= 0; i
< num_texcoords
; ++i
)
893 if(sem_layout
[i
] == 0xff)
895 vpc
->vp
->sprite_fp_input
= NVFX_FP_OP_INPUT_SRC_TC(i
);
900 tgsi_parse_init(&p
, vpc
->pipe
.tokens
);
901 while (!tgsi_parse_end_of_tokens(&p
)) {
902 const union tgsi_full_token
*tok
= &p
.FullToken
;
904 tgsi_parse_token(&p
);
905 switch(tok
->Token
.Type
) {
906 case TGSI_TOKEN_TYPE_IMMEDIATE
:
909 case TGSI_TOKEN_TYPE_DECLARATION
:
911 const struct tgsi_full_declaration
*fdec
;
913 fdec
= &p
.FullToken
.FullDeclaration
;
914 switch (fdec
->Declaration
.File
) {
915 case TGSI_FILE_TEMPORARY
:
916 if (fdec
->Range
.Last
> high_temp
) {
921 case TGSI_FILE_ADDRESS
:
922 if (fdec
->Range
.Last
> high_addr
) {
927 case TGSI_FILE_CONSTANT
:
928 if (fdec
->Range
.Last
> high_const
) {
933 case TGSI_FILE_OUTPUT
:
934 if (!nvfx_vertprog_parse_decl_output(nvfx
, vpc
, fdec
))
949 vpc
->imm
= CALLOC(nr_imm
, sizeof(struct nvfx_reg
));
954 vpc
->r_temp
= CALLOC(high_temp
, sizeof(struct nvfx_reg
));
955 for (i
= 0; i
< high_temp
; i
++)
956 vpc
->r_temp
[i
] = temp(vpc
);
960 vpc
->r_address
= CALLOC(high_addr
, sizeof(struct nvfx_reg
));
961 for (i
= 0; i
< high_addr
; i
++)
962 vpc
->r_address
[i
] = nvfx_reg(NVFXSR_TEMP
, i
);
966 vpc
->r_const
= CALLOC(high_const
, sizeof(struct nvfx_reg
));
967 for (i
= 0; i
< high_const
; i
++)
968 vpc
->r_const
[i
] = constant(vpc
, i
, 0, 0, 0, 0);
971 vpc
->r_temps_discard
= 0;
975 DEBUG_GET_ONCE_BOOL_OPTION(nvfx_dump_vp
, "NVFX_DUMP_VP", FALSE
)
977 static struct nvfx_vertex_program
*
978 nvfx_vertprog_translate(struct nvfx_context
*nvfx
, const struct pipe_shader_state
* vps
, struct tgsi_shader_info
* info
)
980 struct tgsi_parse_context parse
;
981 struct nvfx_vertex_program
* vp
= NULL
;
982 struct nvfx_vpc
*vpc
= NULL
;
983 struct nvfx_src none
= nvfx_src(nvfx_reg(NVFXSR_NONE
, 0));
984 struct util_dynarray insns
;
987 tgsi_parse_init(&parse
, vps
->tokens
);
989 vp
= CALLOC_STRUCT(nvfx_vertex_program
);
993 vpc
= CALLOC_STRUCT(nvfx_vpc
);
1003 // TODO: use a 64-bit atomic here!
1004 static unsigned long long id
= 0;
1008 /* reserve space for ucps */
1009 if(nvfx
->use_vp_clipping
)
1011 for(i
= 0; i
< 6; ++i
)
1012 constant(vpc
, -1, 0, 0, 0, 0);
1015 if (!nvfx_vertprog_prepare(nvfx
, vpc
)) {
1020 /* Redirect post-transform vertex position to a temp if user clip
1021 * planes are enabled. We need to append code to the vtxprog
1022 * to handle clip planes later.
1024 /* TODO: maybe support patching this depending on whether there are ucps: not sure if it is really matters much */
1025 if (nvfx
->use_vp_clipping
) {
1026 vpc
->r_result
[vpc
->hpos_idx
] = temp(vpc
);
1027 vpc
->r_temps_discard
= 0;
1030 util_dynarray_init(&insns
);
1031 while (!tgsi_parse_end_of_tokens(&parse
)) {
1032 tgsi_parse_token(&parse
);
1034 switch (parse
.FullToken
.Token
.Type
) {
1035 case TGSI_TOKEN_TYPE_IMMEDIATE
:
1037 const struct tgsi_full_immediate
*imm
;
1039 imm
= &parse
.FullToken
.FullImmediate
;
1040 assert(imm
->Immediate
.DataType
== TGSI_IMM_FLOAT32
);
1041 assert(imm
->Immediate
.NrTokens
== 4 + 1);
1042 vpc
->imm
[vpc
->nr_imm
++] =
1050 case TGSI_TOKEN_TYPE_INSTRUCTION
:
1052 const struct tgsi_full_instruction
*finst
;
1053 unsigned idx
= insns
.size
>> 2;
1054 util_dynarray_append(&insns
, unsigned, vp
->nr_insns
);
1055 finst
= &parse
.FullToken
.FullInstruction
;
1056 if (!nvfx_vertprog_parse_instruction(nvfx
, vpc
, idx
, finst
))
1065 util_dynarray_append(&insns
, unsigned, vp
->nr_insns
);
1067 for(unsigned i
= 0; i
< vpc
->label_relocs
.size
; i
+= sizeof(struct nvfx_relocation
))
1069 struct nvfx_relocation
* label_reloc
= (struct nvfx_relocation
*)((char*)vpc
->label_relocs
.data
+ i
);
1070 struct nvfx_relocation hw_reloc
;
1072 hw_reloc
.location
= label_reloc
->location
;
1073 hw_reloc
.target
= ((unsigned*)insns
.data
)[label_reloc
->target
];
1075 //debug_printf("hw %u -> tgsi %u = hw %u\n", hw_reloc.location, label_reloc->target, hw_reloc.target);
1077 util_dynarray_append(&vp
->branch_relocs
, struct nvfx_relocation
, hw_reloc
);
1079 util_dynarray_fini(&insns
);
1080 util_dynarray_trim(&vp
->branch_relocs
);
1082 /* XXX: what if we add a RET before?! make sure we jump here...*/
1084 /* Write out HPOS if it was redirected to a temp earlier */
1085 if (vpc
->r_result
[vpc
->hpos_idx
].type
!= NVFXSR_OUTPUT
) {
1086 struct nvfx_reg hpos
= nvfx_reg(NVFXSR_OUTPUT
,
1087 NVFX_VP(INST_DEST_POS
));
1088 struct nvfx_src htmp
= nvfx_src(vpc
->r_result
[vpc
->hpos_idx
]);
1090 nvfx_vp_emit(vpc
, arith(0, VEC
, MOV
, hpos
, NVFX_VP_MASK_ALL
, htmp
, none
, none
));
1093 /* Insert code to handle user clip planes */
1094 if(nvfx
->use_vp_clipping
)
1096 for (i
= 0; i
< 6; i
++) {
1097 struct nvfx_reg cdst
= nvfx_reg(NVFXSR_OUTPUT
, NV30_VP_INST_DEST_CLP(i
));
1098 struct nvfx_src ceqn
= nvfx_src(nvfx_reg(NVFXSR_CONST
, i
));
1099 struct nvfx_src htmp
= nvfx_src(vpc
->r_result
[vpc
->hpos_idx
]);
1105 case 0: case 3: mask
= NVFX_VP_MASK_Y
; break;
1106 case 1: case 4: mask
= NVFX_VP_MASK_Z
; break;
1107 case 2: case 5: mask
= NVFX_VP_MASK_W
; break;
1109 NOUVEAU_ERR("invalid clip dist #%d\n", i
);
1114 mask
= NVFX_VP_MASK_X
;
1116 nvfx_vp_emit(vpc
, arith(0, VEC
, DP4
, cdst
, mask
, htmp
, ceqn
, none
));
1120 if(debug_get_option_nvfx_dump_vp())
1123 tgsi_dump(vpc
->pipe
.tokens
, 0);
1125 debug_printf("\n%s vertex program:\n", nvfx
->is_nv4x
? "nv4x" : "nv3x");
1126 for (i
= 0; i
< vp
->nr_insns
; i
++)
1127 debug_printf("%3u: %08x %08x %08x %08x\n", i
, vp
->insns
[i
].data
[0], vp
->insns
[i
].data
[1], vp
->insns
[i
].data
[2], vp
->insns
[i
].data
[3]);
1132 vp
->exec_start
= -1;
1135 tgsi_parse_free(&parse
);
1137 util_dynarray_fini(&vpc
->label_relocs
);
1138 util_dynarray_fini(&vpc
->loop_stack
);
1140 FREE(vpc
->r_address
);
1153 static struct nvfx_vertex_program
*
1154 nvfx_vertprog_translate_draw_vp(struct nvfx_context
*nvfx
, struct nvfx_pipe_vertex_program
* pvp
)
1156 struct nvfx_vertex_program
* vp
= NULL
;
1157 struct pipe_shader_state vps
;
1158 struct tgsi_shader_info info
;
1159 struct ureg_program
*ureg
= NULL
;
1160 unsigned num_outputs
= MIN2(pvp
->info
.num_outputs
, 16);
1162 ureg
= ureg_create( TGSI_PROCESSOR_VERTEX
);
1166 for (unsigned i
= 0; i
< num_outputs
; i
++)
1167 ureg_MOV(ureg
, ureg_DECL_output(ureg
, pvp
->info
.output_semantic_name
[i
], pvp
->info
.output_semantic_index
[i
]), ureg_DECL_vs_input(ureg
, i
));
1171 vps
.tokens
= ureg_get_tokens(ureg
, 0);
1172 tgsi_scan_shader(vps
.tokens
, &info
);
1173 vp
= nvfx_vertprog_translate(nvfx
, &vps
, &info
);
1174 ureg_free_tokens(vps
.tokens
);
1181 nvfx_vertprog_validate(struct nvfx_context
*nvfx
)
1183 struct nvfx_screen
*screen
= nvfx
->screen
;
1184 struct nouveau_channel
*chan
= screen
->base
.channel
;
1185 struct nouveau_grobj
*eng3d
= screen
->eng3d
;
1186 struct nvfx_pipe_vertex_program
*pvp
= nvfx
->vertprog
;
1187 struct nvfx_vertex_program
* vp
;
1188 struct pipe_resource
*constbuf
;
1189 boolean upload_code
= FALSE
, upload_data
= FALSE
;
1192 if (nvfx
->render_mode
== HW
) {
1193 nvfx
->fallback_swtnl
&= ~NVFX_NEW_VERTPROG
;
1197 vp
= nvfx_vertprog_translate(nvfx
, &pvp
->pipe
, &pvp
->info
);
1199 vp
= NVFX_VP_FAILED
;
1203 if(vp
== NVFX_VP_FAILED
) {
1204 nvfx
->fallback_swtnl
|= NVFX_NEW_VERTPROG
;
1208 constbuf
= nvfx
->constbuf
[PIPE_SHADER_VERTEX
];
1213 pvp
->draw_vp
= vp
= nvfx_vertprog_translate_draw_vp(nvfx
, pvp
);
1215 _debug_printf("Error: unable to create a swtnl passthrough vertex shader: aborting.");
1222 nvfx
->hw_vertprog
= vp
;
1224 /* Allocate hw vtxprog exec slots */
1226 struct nouveau_resource
*heap
= nvfx
->screen
->vp_exec_heap
;
1227 uint vplen
= vp
->nr_insns
;
1229 if (nouveau_resource_alloc(heap
, vplen
, vp
, &vp
->exec
)) {
1230 while (heap
->next
&& heap
->size
< vplen
) {
1231 struct nvfx_vertex_program
*evict
;
1233 evict
= heap
->next
->priv
;
1234 nouveau_resource_free(&evict
->exec
);
1237 if (nouveau_resource_alloc(heap
, vplen
, vp
, &vp
->exec
))
1239 debug_printf("Vertex shader too long: %u instructions\n", vplen
);
1240 nvfx
->fallback_swtnl
|= NVFX_NEW_VERTPROG
;
1248 /* Allocate hw vtxprog const slots */
1249 if (vp
->nr_consts
&& !vp
->data
) {
1250 struct nouveau_resource
*heap
= nvfx
->screen
->vp_data_heap
;
1252 if (nouveau_resource_alloc(heap
, vp
->nr_consts
, vp
, &vp
->data
)) {
1253 while (heap
->next
&& heap
->size
< vp
->nr_consts
) {
1254 struct nvfx_vertex_program
*evict
;
1256 evict
= heap
->next
->priv
;
1257 nouveau_resource_free(&evict
->data
);
1260 if (nouveau_resource_alloc(heap
, vp
->nr_consts
, vp
, &vp
->data
))
1262 debug_printf("Vertex shader uses too many constants: %u constants\n", vp
->nr_consts
);
1263 nvfx
->fallback_swtnl
|= NVFX_NEW_VERTPROG
;
1268 //printf("start at %u nc %u\n", vp->data->start, vp->nr_consts);
1270 /*XXX: handle this some day */
1271 assert(vp
->data
->start
>= vp
->data_start_min
);
1274 if (vp
->data_start
!= vp
->data
->start
)
1278 /* If exec or data segments moved we need to patch the program to
1279 * fixup offsets and register IDs.
1281 if (vp
->exec_start
!= vp
->exec
->start
) {
1282 //printf("vp_relocs %u -> %u\n", vp->exec_start, vp->exec->start);
1283 for(unsigned i
= 0; i
< vp
->branch_relocs
.size
; i
+= sizeof(struct nvfx_relocation
))
1285 struct nvfx_relocation
* reloc
= (struct nvfx_relocation
*)((char*)vp
->branch_relocs
.data
+ i
);
1286 uint32_t* hw
= vp
->insns
[reloc
->location
].data
;
1287 unsigned target
= vp
->exec
->start
+ reloc
->target
;
1289 //debug_printf("vp_reloc hw %u -> hw %u\n", reloc->location, target);
1293 hw
[2] &=~ NV30_VP_INST_IADDR_MASK
;
1294 hw
[2] |= (target
& 0x1ff) << NV30_VP_INST_IADDR_SHIFT
;
1298 hw
[3] &=~ NV40_VP_INST_IADDRL_MASK
;
1299 hw
[3] |= (target
& 7) << NV40_VP_INST_IADDRL_SHIFT
;
1301 hw
[2] &=~ NV40_VP_INST_IADDRH_MASK
;
1302 hw
[2] |= ((target
>> 3) & 0x3f) << NV40_VP_INST_IADDRH_SHIFT
;
1306 vp
->exec_start
= vp
->exec
->start
;
1309 if (vp
->data_start
!= vp
->data
->start
) {
1310 for(unsigned i
= 0; i
< vp
->const_relocs
.size
; i
+= sizeof(struct nvfx_relocation
))
1312 struct nvfx_relocation
* reloc
= (struct nvfx_relocation
*)((char*)vp
->const_relocs
.data
+ i
);
1313 struct nvfx_vertex_program_exec
*vpi
= &vp
->insns
[reloc
->location
];
1315 //printf("reloc %i to %i + %i\n", reloc->location, vp->data->start, reloc->target);
1317 vpi
->data
[1] &= ~NVFX_VP(INST_CONST_SRC_MASK
);
1319 (reloc
->target
+ vp
->data
->start
) <<
1320 NVFX_VP(INST_CONST_SRC_SHIFT
);
1323 vp
->data_start
= vp
->data
->start
;
1327 /* Update + Upload constant values */
1328 if (vp
->nr_consts
) {
1332 map
= (float*)nvfx_buffer(constbuf
)->data
;
1335 * WAIT_RING(chan, 512 * 6);
1336 for (i = 0; i < 512; i++) {
1337 float v[4] = {0.1, 0,2, 0.3, 0.4};
1338 OUT_RING(chan, RING_3D(NV30_3D_VP_UPLOAD_CONST_ID, 5));
1340 OUT_RINGp(chan, (uint32_t *)v, 4);
1341 printf("frob %i\n", i);
1345 for (i
= nvfx
->use_vp_clipping
? 6 : 0; i
< vp
->nr_consts
; i
++) {
1346 struct nvfx_vertex_program_data
*vpd
= &vp
->consts
[i
];
1348 if (vpd
->index
>= 0) {
1350 !memcmp(vpd
->value
, &map
[vpd
->index
* 4],
1353 memcpy(vpd
->value
, &map
[vpd
->index
* 4],
1357 //printf("upload into %i + %i: %f %f %f %f\n", vp->data->start, i, vpd->value[0], vpd->value[1], vpd->value[2], vpd->value[3]);
1359 BEGIN_RING(chan
, eng3d
, NV30_3D_VP_UPLOAD_CONST_ID
, 5);
1360 OUT_RING(chan
, i
+ vp
->data
->start
);
1361 OUT_RINGp(chan
, (uint32_t *)vpd
->value
, 4);
1365 /* Upload vtxprog */
1367 BEGIN_RING(chan
, eng3d
, NV30_3D_VP_UPLOAD_FROM_ID
, 1);
1368 OUT_RING(chan
, vp
->exec
->start
);
1369 for (i
= 0; i
< vp
->nr_insns
; i
++) {
1370 BEGIN_RING(chan
, eng3d
, NV30_3D_VP_UPLOAD_INST(0), 4);
1371 //printf("%08x %08x %08x %08x\n", vp->insns[i].data[0], vp->insns[i].data[1], vp->insns[i].data[2], vp->insns[i].data[3]);
1372 OUT_RINGp(chan
, vp
->insns
[i
].data
, 4);
1377 if(nvfx
->dirty
& (NVFX_NEW_VERTPROG
))
1379 BEGIN_RING(chan
, eng3d
, NV30_3D_VP_START_FROM_ID
, 1);
1380 OUT_RING(chan
, vp
->exec
->start
);
1382 BEGIN_RING(chan
, eng3d
, NV40_3D_VP_ATTRIB_EN
, 1);
1383 OUT_RING(chan
, vp
->ir
);
1391 nvfx_vertprog_destroy(struct nvfx_context
*nvfx
, struct nvfx_vertex_program
*vp
)
1399 nouveau_resource_free(&vp
->exec
);
1400 nouveau_resource_free(&vp
->data
);
1402 util_dynarray_fini(&vp
->branch_relocs
);
1403 util_dynarray_fini(&vp
->const_relocs
);
1408 nvfx_vp_state_create(struct pipe_context
*pipe
, const struct pipe_shader_state
*cso
)
1410 struct nvfx_pipe_vertex_program
*pvp
;
1412 pvp
= CALLOC(1, sizeof(struct nvfx_pipe_vertex_program
));
1413 pvp
->pipe
.tokens
= tgsi_dup_tokens(cso
->tokens
);
1414 tgsi_scan_shader(pvp
->pipe
.tokens
, &pvp
->info
);
1415 pvp
->draw_elements
= MAX2(1, MIN2(pvp
->info
.num_outputs
, 16));
1416 pvp
->draw_no_elements
= pvp
->info
.num_outputs
== 0;
1422 nvfx_vp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
1424 struct nvfx_context
*nvfx
= nvfx_context(pipe
);
1426 nvfx
->vertprog
= hwcso
;
1427 nvfx
->dirty
|= NVFX_NEW_VERTPROG
;
1428 nvfx
->draw_dirty
|= NVFX_NEW_VERTPROG
;
1432 nvfx_vp_state_delete(struct pipe_context
*pipe
, void *hwcso
)
1434 struct nvfx_context
*nvfx
= nvfx_context(pipe
);
1435 struct nvfx_pipe_vertex_program
*pvp
= hwcso
;
1438 draw_delete_vertex_shader(nvfx
->draw
, pvp
->draw_vs
);
1439 if(pvp
->vp
&& pvp
->vp
!= NVFX_VP_FAILED
)
1440 nvfx_vertprog_destroy(nvfx
, pvp
->vp
);
1442 nvfx_vertprog_destroy(nvfx
, pvp
->draw_vp
);
1443 FREE((void*)pvp
->pipe
.tokens
);
1448 nvfx_init_vertprog_functions(struct nvfx_context
*nvfx
)
1450 nvfx
->pipe
.create_vs_state
= nvfx_vp_state_create
;
1451 nvfx
->pipe
.bind_vs_state
= nvfx_vp_state_bind
;
1452 nvfx
->pipe
.delete_vs_state
= nvfx_vp_state_delete
;