1 #include "pipe/p_context.h"
2 #include "pipe/p_defines.h"
3 #include "pipe/p_state.h"
4 #include "util/u_inlines.h"
6 #include "pipe/p_shader_tokens.h"
7 #include "tgsi/tgsi_parse.h"
8 #include "tgsi/tgsi_dump.h"
9 #include "tgsi/tgsi_util.h"
11 #include "nvfx_context.h"
12 #include "nvfx_state.h"
14 /* TODO (at least...):
15 * 1. Indexed consts + ARL
16 * 3. NV_vp11, NV_vp2, NV_vp3 features
17 * - extra arith opcodes
25 #include "nv30_vertprog.h"
26 #include "nv40_vertprog.h"
28 #define NVFX_VP_INST_DEST_CLIP(n) ((~0 - 6) + (n))
31 struct nvfx_vertex_program
*vp
;
33 struct nvfx_vertex_program_exec
*vpi
;
36 unsigned r_temps_discard
;
37 struct nvfx_sreg r_result
[PIPE_MAX_SHADER_OUTPUTS
];
38 struct nvfx_sreg
*r_address
;
39 struct nvfx_sreg
*r_temp
;
41 struct nvfx_sreg
*imm
;
47 static struct nvfx_sreg
48 temp(struct nvfx_vpc
*vpc
)
50 int idx
= ffs(~vpc
->r_temps
) - 1;
53 NOUVEAU_ERR("out of temps!!\n");
55 return nvfx_sr(NVFXSR_TEMP
, 0);
58 vpc
->r_temps
|= (1 << idx
);
59 vpc
->r_temps_discard
|= (1 << idx
);
60 return nvfx_sr(NVFXSR_TEMP
, idx
);
64 release_temps(struct nvfx_vpc
*vpc
)
66 vpc
->r_temps
&= ~vpc
->r_temps_discard
;
67 vpc
->r_temps_discard
= 0;
70 static struct nvfx_sreg
71 constant(struct nvfx_vpc
*vpc
, int pipe
, float x
, float y
, float z
, float w
)
73 struct nvfx_vertex_program
*vp
= vpc
->vp
;
74 struct nvfx_vertex_program_data
*vpd
;
78 for (idx
= 0; idx
< vp
->nr_consts
; idx
++) {
79 if (vp
->consts
[idx
].index
== pipe
)
80 return nvfx_sr(NVFXSR_CONST
, idx
);
84 idx
= vp
->nr_consts
++;
85 vp
->consts
= realloc(vp
->consts
, sizeof(*vpd
) * vp
->nr_consts
);
86 vpd
= &vp
->consts
[idx
];
93 return nvfx_sr(NVFXSR_CONST
, idx
);
96 #define arith(cc,s,o,d,m,s0,s1,s2) \
97 nvfx_vp_arith(nvfx, (cc), NVFX_VP_INST_SLOT_##s, NVFX_VP_INST_##s##_OP_##o, (d), (m), (s0), (s1), (s2))
100 emit_src(struct nvfx_context
* nvfx
, struct nvfx_vpc
*vpc
, uint32_t *hw
, int pos
, struct nvfx_sreg src
)
102 struct nvfx_vertex_program
*vp
= vpc
->vp
;
107 sr
|= (NVFX_VP(SRC_REG_TYPE_TEMP
) << NVFX_VP(SRC_REG_TYPE_SHIFT
));
108 sr
|= (src
.index
<< NVFX_VP(SRC_TEMP_SRC_SHIFT
));
111 sr
|= (NVFX_VP(SRC_REG_TYPE_INPUT
) <<
112 NVFX_VP(SRC_REG_TYPE_SHIFT
));
113 vp
->ir
|= (1 << src
.index
);
114 hw
[1] |= (src
.index
<< NVFX_VP(INST_INPUT_SRC_SHIFT
));
117 sr
|= (NVFX_VP(SRC_REG_TYPE_CONST
) <<
118 NVFX_VP(SRC_REG_TYPE_SHIFT
));
119 assert(vpc
->vpi
->const_index
== -1 ||
120 vpc
->vpi
->const_index
== src
.index
);
121 vpc
->vpi
->const_index
= src
.index
;
124 sr
|= (NVFX_VP(SRC_REG_TYPE_INPUT
) <<
125 NVFX_VP(SRC_REG_TYPE_SHIFT
));
132 sr
|= NVFX_VP(SRC_NEGATE
);
135 hw
[0] |= (1 << (21 + pos
));
137 sr
|= ((src
.swz
[0] << NVFX_VP(SRC_SWZ_X_SHIFT
)) |
138 (src
.swz
[1] << NVFX_VP(SRC_SWZ_Y_SHIFT
)) |
139 (src
.swz
[2] << NVFX_VP(SRC_SWZ_Z_SHIFT
)) |
140 (src
.swz
[3] << NVFX_VP(SRC_SWZ_W_SHIFT
)));
144 hw
[1] |= ((sr
& NVFX_VP(SRC0_HIGH_MASK
)) >>
145 NVFX_VP(SRC0_HIGH_SHIFT
)) << NVFX_VP(INST_SRC0H_SHIFT
);
146 hw
[2] |= (sr
& NVFX_VP(SRC0_LOW_MASK
)) <<
147 NVFX_VP(INST_SRC0L_SHIFT
);
150 hw
[2] |= sr
<< NVFX_VP(INST_SRC1_SHIFT
);
153 hw
[2] |= ((sr
& NVFX_VP(SRC2_HIGH_MASK
)) >>
154 NVFX_VP(SRC2_HIGH_SHIFT
)) << NVFX_VP(INST_SRC2H_SHIFT
);
155 hw
[3] |= (sr
& NVFX_VP(SRC2_LOW_MASK
)) <<
156 NVFX_VP(INST_SRC2L_SHIFT
);
164 emit_dst(struct nvfx_context
* nvfx
, struct nvfx_vpc
*vpc
, uint32_t *hw
, int slot
, struct nvfx_sreg dst
)
166 struct nvfx_vertex_program
*vp
= vpc
->vp
;
171 hw
[0] |= (dst
.index
<< NV30_VP_INST_DEST_TEMP_ID_SHIFT
);
173 hw
[3] |= NV40_VP_INST_DEST_MASK
;
175 hw
[0] |= (dst
.index
<<
176 NV40_VP_INST_VEC_DEST_TEMP_SHIFT
);
178 hw
[3] |= (dst
.index
<<
179 NV40_VP_INST_SCA_DEST_TEMP_SHIFT
);
184 /* TODO: this may be wrong because on nv30 COL0 and BFC0 are swapped */
186 case NVFX_VP_INST_DEST_CLIP(0):
188 vp
->clip_ctrl
|= NV34TCL_VP_CLIP_PLANES_ENABLE_PLANE0
;
189 dst
.index
= NVFX_VP(INST_DEST_FOGC
);
191 case NVFX_VP_INST_DEST_CLIP(1):
193 vp
->clip_ctrl
|= NV34TCL_VP_CLIP_PLANES_ENABLE_PLANE1
;
194 dst
.index
= NVFX_VP(INST_DEST_FOGC
);
196 case NVFX_VP_INST_DEST_CLIP(2):
198 vp
->clip_ctrl
|= NV34TCL_VP_CLIP_PLANES_ENABLE_PLANE2
;
199 dst
.index
= NVFX_VP(INST_DEST_FOGC
);
201 case NVFX_VP_INST_DEST_CLIP(3):
203 vp
->clip_ctrl
|= NV34TCL_VP_CLIP_PLANES_ENABLE_PLANE3
;
204 dst
.index
= NVFX_VP(INST_DEST_PSZ
);
206 case NVFX_VP_INST_DEST_CLIP(4):
208 vp
->clip_ctrl
|= NV34TCL_VP_CLIP_PLANES_ENABLE_PLANE4
;
209 dst
.index
= NVFX_VP(INST_DEST_PSZ
);
211 case NVFX_VP_INST_DEST_CLIP(5):
213 vp
->clip_ctrl
|= NV34TCL_VP_CLIP_PLANES_ENABLE_PLANE5
;
214 dst
.index
= NVFX_VP(INST_DEST_PSZ
);
219 case NV30_VP_INST_DEST_COL0
: vp
->or |= (1 << 0); break;
220 case NV30_VP_INST_DEST_COL1
: vp
->or |= (1 << 1); break;
221 case NV30_VP_INST_DEST_BFC0
: vp
->or |= (1 << 2); break;
222 case NV30_VP_INST_DEST_BFC1
: vp
->or |= (1 << 3); break;
223 case NV30_VP_INST_DEST_FOGC
: vp
->or |= (1 << 4); break;
224 case NV30_VP_INST_DEST_PSZ
: vp
->or |= (1 << 5); break;
225 case NV30_VP_INST_DEST_TC(0): vp
->or |= (1 << 14); break;
226 case NV30_VP_INST_DEST_TC(1): vp
->or |= (1 << 15); break;
227 case NV30_VP_INST_DEST_TC(2): vp
->or |= (1 << 16); break;
228 case NV30_VP_INST_DEST_TC(3): vp
->or |= (1 << 17); break;
229 case NV30_VP_INST_DEST_TC(4): vp
->or |= (1 << 18); break;
230 case NV30_VP_INST_DEST_TC(5): vp
->or |= (1 << 19); break;
231 case NV30_VP_INST_DEST_TC(6): vp
->or |= (1 << 20); break;
232 case NV30_VP_INST_DEST_TC(7): vp
->or |= (1 << 21); break;
236 case NV40_VP_INST_DEST_COL0
: vp
->or |= (1 << 0); break;
237 case NV40_VP_INST_DEST_COL1
: vp
->or |= (1 << 1); break;
238 case NV40_VP_INST_DEST_BFC0
: vp
->or |= (1 << 2); break;
239 case NV40_VP_INST_DEST_BFC1
: vp
->or |= (1 << 3); break;
240 case NV40_VP_INST_DEST_FOGC
: vp
->or |= (1 << 4); break;
241 case NV40_VP_INST_DEST_PSZ
: vp
->or |= (1 << 5); break;
242 case NV40_VP_INST_DEST_TC(0): vp
->or |= (1 << 14); break;
243 case NV40_VP_INST_DEST_TC(1): vp
->or |= (1 << 15); break;
244 case NV40_VP_INST_DEST_TC(2): vp
->or |= (1 << 16); break;
245 case NV40_VP_INST_DEST_TC(3): vp
->or |= (1 << 17); break;
246 case NV40_VP_INST_DEST_TC(4): vp
->or |= (1 << 18); break;
247 case NV40_VP_INST_DEST_TC(5): vp
->or |= (1 << 19); break;
248 case NV40_VP_INST_DEST_TC(6): vp
->or |= (1 << 20); break;
249 case NV40_VP_INST_DEST_TC(7): vp
->or |= (1 << 21); break;
256 hw
[3] |= (dst
.index
<< NV30_VP_INST_DEST_SHIFT
);
257 hw
[0] |= NV30_VP_INST_VEC_DEST_TEMP_MASK
| (1<<20);
259 /*XXX: no way this is entirely correct, someone needs to
260 * figure out what exactly it is.
264 hw
[3] |= (dst
.index
<< NV40_VP_INST_DEST_SHIFT
);
266 hw
[0] |= NV40_VP_INST_VEC_RESULT
;
267 hw
[0] |= NV40_VP_INST_VEC_DEST_TEMP_MASK
| (1<<20);
269 hw
[3] |= NV40_VP_INST_SCA_RESULT
;
270 hw
[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK
;
280 nvfx_vp_arith(struct nvfx_context
* nvfx
, struct nvfx_vpc
*vpc
, int slot
, int op
,
281 struct nvfx_sreg dst
, int mask
,
282 struct nvfx_sreg s0
, struct nvfx_sreg s1
,
285 struct nvfx_vertex_program
*vp
= vpc
->vp
;
288 vp
->insns
= realloc(vp
->insns
, ++vp
->nr_insns
* sizeof(*vpc
->vpi
));
289 vpc
->vpi
= &vp
->insns
[vp
->nr_insns
- 1];
290 memset(vpc
->vpi
, 0, sizeof(*vpc
->vpi
));
291 vpc
->vpi
->const_index
= -1;
295 hw
[0] |= (NVFX_COND_TR
<< NVFX_VP(INST_COND_SHIFT
));
296 hw
[0] |= ((0 << NVFX_VP(INST_COND_SWZ_X_SHIFT
)) |
297 (1 << NVFX_VP(INST_COND_SWZ_Y_SHIFT
)) |
298 (2 << NVFX_VP(INST_COND_SWZ_Z_SHIFT
)) |
299 (3 << NVFX_VP(INST_COND_SWZ_W_SHIFT
)));
302 hw
[1] |= (op
<< NV30_VP_INST_VEC_OPCODE_SHIFT
);
303 // hw[3] |= NVFX_VP(INST_SCA_DEST_TEMP_MASK);
304 // hw[3] |= (mask << NVFX_VP(INST_VEC_WRITEMASK_SHIFT));
306 if (dst
.type
== NVFXSR_OUTPUT
) {
308 hw
[3] |= (mask
<< NV30_VP_INST_SDEST_WRITEMASK_SHIFT
);
310 hw
[3] |= (mask
<< NV30_VP_INST_VDEST_WRITEMASK_SHIFT
);
313 hw
[3] |= (mask
<< NV30_VP_INST_STEMP_WRITEMASK_SHIFT
);
315 hw
[3] |= (mask
<< NV30_VP_INST_VTEMP_WRITEMASK_SHIFT
);
319 hw
[1] |= (op
<< NV40_VP_INST_VEC_OPCODE_SHIFT
);
320 hw
[3] |= NV40_VP_INST_SCA_DEST_TEMP_MASK
;
321 hw
[3] |= (mask
<< NV40_VP_INST_VEC_WRITEMASK_SHIFT
);
323 hw
[1] |= (op
<< NV40_VP_INST_SCA_OPCODE_SHIFT
);
324 hw
[0] |= (NV40_VP_INST_VEC_DEST_TEMP_MASK
| (1 << 20));
325 hw
[3] |= (mask
<< NV40_VP_INST_SCA_WRITEMASK_SHIFT
);
329 emit_dst(nvfx
, vpc
, hw
, slot
, dst
);
330 emit_src(nvfx
, vpc
, hw
, 0, s0
);
331 emit_src(nvfx
, vpc
, hw
, 1, s1
);
332 emit_src(nvfx
, vpc
, hw
, 2, s2
);
335 static INLINE
struct nvfx_sreg
336 tgsi_src(struct nvfx_vpc
*vpc
, const struct tgsi_full_src_register
*fsrc
) {
337 struct nvfx_sreg src
;
339 switch (fsrc
->Register
.File
) {
340 case TGSI_FILE_INPUT
:
341 src
= nvfx_sr(NVFXSR_INPUT
, fsrc
->Register
.Index
);
343 case TGSI_FILE_CONSTANT
:
344 src
= constant(vpc
, fsrc
->Register
.Index
, 0, 0, 0, 0);
346 case TGSI_FILE_IMMEDIATE
:
347 src
= vpc
->imm
[fsrc
->Register
.Index
];
349 case TGSI_FILE_TEMPORARY
:
350 src
= vpc
->r_temp
[fsrc
->Register
.Index
];
353 NOUVEAU_ERR("bad src file\n");
357 src
.abs
= fsrc
->Register
.Absolute
;
358 src
.negate
= fsrc
->Register
.Negate
;
359 src
.swz
[0] = fsrc
->Register
.SwizzleX
;
360 src
.swz
[1] = fsrc
->Register
.SwizzleY
;
361 src
.swz
[2] = fsrc
->Register
.SwizzleZ
;
362 src
.swz
[3] = fsrc
->Register
.SwizzleW
;
366 static INLINE
struct nvfx_sreg
367 tgsi_dst(struct nvfx_vpc
*vpc
, const struct tgsi_full_dst_register
*fdst
) {
368 struct nvfx_sreg dst
;
370 switch (fdst
->Register
.File
) {
371 case TGSI_FILE_OUTPUT
:
372 dst
= vpc
->r_result
[fdst
->Register
.Index
];
374 case TGSI_FILE_TEMPORARY
:
375 dst
= vpc
->r_temp
[fdst
->Register
.Index
];
377 case TGSI_FILE_ADDRESS
:
378 dst
= vpc
->r_address
[fdst
->Register
.Index
];
381 NOUVEAU_ERR("bad dst file\n");
393 if (tgsi
& TGSI_WRITEMASK_X
) mask
|= NVFX_VP_MASK_X
;
394 if (tgsi
& TGSI_WRITEMASK_Y
) mask
|= NVFX_VP_MASK_Y
;
395 if (tgsi
& TGSI_WRITEMASK_Z
) mask
|= NVFX_VP_MASK_Z
;
396 if (tgsi
& TGSI_WRITEMASK_W
) mask
|= NVFX_VP_MASK_W
;
401 nvfx_vertprog_parse_instruction(struct nvfx_context
* nvfx
, struct nvfx_vpc
*vpc
,
402 const struct tgsi_full_instruction
*finst
)
404 struct nvfx_sreg src
[3], dst
, tmp
;
405 struct nvfx_sreg none
= nvfx_sr(NVFXSR_NONE
, 0);
407 int ai
= -1, ci
= -1, ii
= -1;
410 if (finst
->Instruction
.Opcode
== TGSI_OPCODE_END
)
413 for (i
= 0; i
< finst
->Instruction
.NumSrcRegs
; i
++) {
414 const struct tgsi_full_src_register
*fsrc
;
416 fsrc
= &finst
->Src
[i
];
417 if (fsrc
->Register
.File
== TGSI_FILE_TEMPORARY
) {
418 src
[i
] = tgsi_src(vpc
, fsrc
);
422 for (i
= 0; i
< finst
->Instruction
.NumSrcRegs
; i
++) {
423 const struct tgsi_full_src_register
*fsrc
;
425 fsrc
= &finst
->Src
[i
];
427 switch (fsrc
->Register
.File
) {
428 case TGSI_FILE_INPUT
:
429 if (ai
== -1 || ai
== fsrc
->Register
.Index
) {
430 ai
= fsrc
->Register
.Index
;
431 src
[i
] = tgsi_src(vpc
, fsrc
);
434 arith(vpc
, VEC
, MOV
, src
[i
], NVFX_VP_MASK_ALL
,
435 tgsi_src(vpc
, fsrc
), none
, none
);
438 case TGSI_FILE_CONSTANT
:
439 if ((ci
== -1 && ii
== -1) ||
440 ci
== fsrc
->Register
.Index
) {
441 ci
= fsrc
->Register
.Index
;
442 src
[i
] = tgsi_src(vpc
, fsrc
);
445 arith(vpc
, VEC
, MOV
, src
[i
], NVFX_VP_MASK_ALL
,
446 tgsi_src(vpc
, fsrc
), none
, none
);
449 case TGSI_FILE_IMMEDIATE
:
450 if ((ci
== -1 && ii
== -1) ||
451 ii
== fsrc
->Register
.Index
) {
452 ii
= fsrc
->Register
.Index
;
453 src
[i
] = tgsi_src(vpc
, fsrc
);
456 arith(vpc
, VEC
, MOV
, src
[i
], NVFX_VP_MASK_ALL
,
457 tgsi_src(vpc
, fsrc
), none
, none
);
460 case TGSI_FILE_TEMPORARY
:
464 NOUVEAU_ERR("bad src file\n");
469 dst
= tgsi_dst(vpc
, &finst
->Dst
[0]);
470 mask
= tgsi_mask(finst
->Dst
[0].Register
.WriteMask
);
472 switch (finst
->Instruction
.Opcode
) {
473 case TGSI_OPCODE_ABS
:
474 arith(vpc
, VEC
, MOV
, dst
, mask
, abs(src
[0]), none
, none
);
476 case TGSI_OPCODE_ADD
:
477 arith(vpc
, VEC
, ADD
, dst
, mask
, src
[0], none
, src
[1]);
479 case TGSI_OPCODE_ARL
:
480 arith(vpc
, VEC
, ARL
, dst
, mask
, src
[0], none
, none
);
482 case TGSI_OPCODE_DP3
:
483 arith(vpc
, VEC
, DP3
, dst
, mask
, src
[0], src
[1], none
);
485 case TGSI_OPCODE_DP4
:
486 arith(vpc
, VEC
, DP4
, dst
, mask
, src
[0], src
[1], none
);
488 case TGSI_OPCODE_DPH
:
489 arith(vpc
, VEC
, DPH
, dst
, mask
, src
[0], src
[1], none
);
491 case TGSI_OPCODE_DST
:
492 arith(vpc
, VEC
, DST
, dst
, mask
, src
[0], src
[1], none
);
494 case TGSI_OPCODE_EX2
:
495 arith(vpc
, SCA
, EX2
, dst
, mask
, none
, none
, src
[0]);
497 case TGSI_OPCODE_EXP
:
498 arith(vpc
, SCA
, EXP
, dst
, mask
, none
, none
, src
[0]);
500 case TGSI_OPCODE_FLR
:
501 arith(vpc
, VEC
, FLR
, dst
, mask
, src
[0], none
, none
);
503 case TGSI_OPCODE_FRC
:
504 arith(vpc
, VEC
, FRC
, dst
, mask
, src
[0], none
, none
);
506 case TGSI_OPCODE_LG2
:
507 arith(vpc
, SCA
, LG2
, dst
, mask
, none
, none
, src
[0]);
509 case TGSI_OPCODE_LIT
:
510 arith(vpc
, SCA
, LIT
, dst
, mask
, none
, none
, src
[0]);
512 case TGSI_OPCODE_LOG
:
513 arith(vpc
, SCA
, LOG
, dst
, mask
, none
, none
, src
[0]);
515 case TGSI_OPCODE_MAD
:
516 arith(vpc
, VEC
, MAD
, dst
, mask
, src
[0], src
[1], src
[2]);
518 case TGSI_OPCODE_MAX
:
519 arith(vpc
, VEC
, MAX
, dst
, mask
, src
[0], src
[1], none
);
521 case TGSI_OPCODE_MIN
:
522 arith(vpc
, VEC
, MIN
, dst
, mask
, src
[0], src
[1], none
);
524 case TGSI_OPCODE_MOV
:
525 arith(vpc
, VEC
, MOV
, dst
, mask
, src
[0], none
, none
);
527 case TGSI_OPCODE_MUL
:
528 arith(vpc
, VEC
, MUL
, dst
, mask
, src
[0], src
[1], none
);
530 case TGSI_OPCODE_POW
:
532 arith(vpc
, SCA
, LG2
, tmp
, NVFX_VP_MASK_X
, none
, none
,
533 swz(src
[0], X
, X
, X
, X
));
534 arith(vpc
, VEC
, MUL
, tmp
, NVFX_VP_MASK_X
, swz(tmp
, X
, X
, X
, X
),
535 swz(src
[1], X
, X
, X
, X
), none
);
536 arith(vpc
, SCA
, EX2
, dst
, mask
, none
, none
,
537 swz(tmp
, X
, X
, X
, X
));
539 case TGSI_OPCODE_RCP
:
540 arith(vpc
, SCA
, RCP
, dst
, mask
, none
, none
, src
[0]);
542 case TGSI_OPCODE_RET
:
544 case TGSI_OPCODE_RSQ
:
545 arith(vpc
, SCA
, RSQ
, dst
, mask
, none
, none
, abs(src
[0]));
547 case TGSI_OPCODE_SGE
:
548 arith(vpc
, VEC
, SGE
, dst
, mask
, src
[0], src
[1], none
);
550 case TGSI_OPCODE_SGT
:
551 arith(vpc
, VEC
, SGT
, dst
, mask
, src
[0], src
[1], none
);
553 case TGSI_OPCODE_SLT
:
554 arith(vpc
, VEC
, SLT
, dst
, mask
, src
[0], src
[1], none
);
556 case TGSI_OPCODE_SUB
:
557 arith(vpc
, VEC
, ADD
, dst
, mask
, src
[0], none
, neg(src
[1]));
559 case TGSI_OPCODE_XPD
:
561 arith(vpc
, VEC
, MUL
, tmp
, mask
,
562 swz(src
[0], Z
, X
, Y
, Y
), swz(src
[1], Y
, Z
, X
, X
), none
);
563 arith(vpc
, VEC
, MAD
, dst
, (mask
& ~NVFX_VP_MASK_W
),
564 swz(src
[0], Y
, Z
, X
, X
), swz(src
[1], Z
, X
, Y
, Y
),
568 NOUVEAU_ERR("invalid opcode %d\n", finst
->Instruction
.Opcode
);
577 nvfx_vertprog_parse_decl_output(struct nvfx_context
* nvfx
, struct nvfx_vpc
*vpc
,
578 const struct tgsi_full_declaration
*fdec
)
580 unsigned idx
= fdec
->Range
.First
;
583 switch (fdec
->Semantic
.Name
) {
584 case TGSI_SEMANTIC_POSITION
:
585 hw
= NVFX_VP(INST_DEST_POS
);
588 case TGSI_SEMANTIC_COLOR
:
589 if (fdec
->Semantic
.Index
== 0) {
590 hw
= NVFX_VP(INST_DEST_COL0
);
592 if (fdec
->Semantic
.Index
== 1) {
593 hw
= NVFX_VP(INST_DEST_COL1
);
595 NOUVEAU_ERR("bad colour semantic index\n");
599 case TGSI_SEMANTIC_BCOLOR
:
600 if (fdec
->Semantic
.Index
== 0) {
601 hw
= NVFX_VP(INST_DEST_BFC0
);
603 if (fdec
->Semantic
.Index
== 1) {
604 hw
= NVFX_VP(INST_DEST_BFC1
);
606 NOUVEAU_ERR("bad bcolour semantic index\n");
610 case TGSI_SEMANTIC_FOG
:
611 hw
= NVFX_VP(INST_DEST_FOGC
);
613 case TGSI_SEMANTIC_PSIZE
:
614 hw
= NVFX_VP(INST_DEST_PSZ
);
616 case TGSI_SEMANTIC_GENERIC
:
617 if (fdec
->Semantic
.Index
<= 7) {
618 hw
= NVFX_VP(INST_DEST_TC(fdec
->Semantic
.Index
));
620 NOUVEAU_ERR("bad generic semantic index\n");
624 case TGSI_SEMANTIC_EDGEFLAG
:
625 /* not really an error just a fallback */
626 NOUVEAU_ERR("cannot handle edgeflag output\n");
629 NOUVEAU_ERR("bad output semantic\n");
633 vpc
->r_result
[idx
] = nvfx_sr(NVFXSR_OUTPUT
, hw
);
638 nvfx_vertprog_prepare(struct nvfx_context
* nvfx
, struct nvfx_vpc
*vpc
)
640 struct tgsi_parse_context p
;
641 int high_temp
= -1, high_addr
= -1, nr_imm
= 0, i
;
643 tgsi_parse_init(&p
, vpc
->vp
->pipe
.tokens
);
644 while (!tgsi_parse_end_of_tokens(&p
)) {
645 const union tgsi_full_token
*tok
= &p
.FullToken
;
647 tgsi_parse_token(&p
);
648 switch(tok
->Token
.Type
) {
649 case TGSI_TOKEN_TYPE_IMMEDIATE
:
652 case TGSI_TOKEN_TYPE_DECLARATION
:
654 const struct tgsi_full_declaration
*fdec
;
656 fdec
= &p
.FullToken
.FullDeclaration
;
657 switch (fdec
->Declaration
.File
) {
658 case TGSI_FILE_TEMPORARY
:
659 if (fdec
->Range
.Last
> high_temp
) {
664 #if 0 /* this would be nice.. except gallium doesn't track it */
665 case TGSI_FILE_ADDRESS
:
666 if (fdec
->Range
.Last
> high_addr
) {
672 case TGSI_FILE_OUTPUT
:
673 if (!nvfx_vertprog_parse_decl_output(nvfx
, vpc
, fdec
))
681 #if 1 /* yay, parse instructions looking for address regs instead */
682 case TGSI_TOKEN_TYPE_INSTRUCTION
:
684 const struct tgsi_full_instruction
*finst
;
685 const struct tgsi_full_dst_register
*fdst
;
687 finst
= &p
.FullToken
.FullInstruction
;
688 fdst
= &finst
->Dst
[0];
690 if (fdst
->Register
.File
== TGSI_FILE_ADDRESS
) {
691 if (fdst
->Register
.Index
> high_addr
)
692 high_addr
= fdst
->Register
.Index
;
705 vpc
->imm
= CALLOC(nr_imm
, sizeof(struct nvfx_sreg
));
710 vpc
->r_temp
= CALLOC(high_temp
, sizeof(struct nvfx_sreg
));
711 for (i
= 0; i
< high_temp
; i
++)
712 vpc
->r_temp
[i
] = temp(vpc
);
716 vpc
->r_address
= CALLOC(high_addr
, sizeof(struct nvfx_sreg
));
717 for (i
= 0; i
< high_addr
; i
++)
718 vpc
->r_address
[i
] = temp(vpc
);
721 vpc
->r_temps_discard
= 0;
726 nvfx_vertprog_translate(struct nvfx_context
*nvfx
,
727 struct nvfx_vertex_program
*vp
)
729 struct tgsi_parse_context parse
;
730 struct nvfx_vpc
*vpc
= NULL
;
731 struct nvfx_sreg none
= nvfx_sr(NVFXSR_NONE
, 0);
734 vpc
= CALLOC(1, sizeof(struct nvfx_vpc
));
739 if (!nvfx_vertprog_prepare(nvfx
, vpc
)) {
744 /* Redirect post-transform vertex position to a temp if user clip
745 * planes are enabled. We need to append code to the vtxprog
746 * to handle clip planes later.
749 vpc
->r_result
[vpc
->hpos_idx
] = temp(vpc
);
750 vpc
->r_temps_discard
= 0;
753 tgsi_parse_init(&parse
, vp
->pipe
.tokens
);
755 while (!tgsi_parse_end_of_tokens(&parse
)) {
756 tgsi_parse_token(&parse
);
758 switch (parse
.FullToken
.Token
.Type
) {
759 case TGSI_TOKEN_TYPE_IMMEDIATE
:
761 const struct tgsi_full_immediate
*imm
;
763 imm
= &parse
.FullToken
.FullImmediate
;
764 assert(imm
->Immediate
.DataType
== TGSI_IMM_FLOAT32
);
765 assert(imm
->Immediate
.NrTokens
== 4 + 1);
766 vpc
->imm
[vpc
->nr_imm
++] =
774 case TGSI_TOKEN_TYPE_INSTRUCTION
:
776 const struct tgsi_full_instruction
*finst
;
777 finst
= &parse
.FullToken
.FullInstruction
;
778 if (!nvfx_vertprog_parse_instruction(nvfx
, vpc
, finst
))
787 /* Write out HPOS if it was redirected to a temp earlier */
788 if (vpc
->r_result
[vpc
->hpos_idx
].type
!= NVFXSR_OUTPUT
) {
789 struct nvfx_sreg hpos
= nvfx_sr(NVFXSR_OUTPUT
,
790 NVFX_VP(INST_DEST_POS
));
791 struct nvfx_sreg htmp
= vpc
->r_result
[vpc
->hpos_idx
];
793 arith(vpc
, VEC
, MOV
, hpos
, NVFX_VP_MASK_ALL
, htmp
, none
, none
);
796 /* Insert code to handle user clip planes */
797 for (i
= 0; i
< vp
->ucp
.nr
; i
++) {
798 struct nvfx_sreg cdst
= nvfx_sr(NVFXSR_OUTPUT
,
799 NVFX_VP_INST_DEST_CLIP(i
));
800 struct nvfx_sreg ceqn
= constant(vpc
, -1,
801 nvfx
->clip
.ucp
[i
][0],
802 nvfx
->clip
.ucp
[i
][1],
803 nvfx
->clip
.ucp
[i
][2],
804 nvfx
->clip
.ucp
[i
][3]);
805 struct nvfx_sreg htmp
= vpc
->r_result
[vpc
->hpos_idx
];
809 case 0: case 3: mask
= NVFX_VP_MASK_Y
; break;
810 case 1: case 4: mask
= NVFX_VP_MASK_Z
; break;
811 case 2: case 5: mask
= NVFX_VP_MASK_W
; break;
813 NOUVEAU_ERR("invalid clip dist #%d\n", i
);
817 arith(vpc
, VEC
, DP4
, cdst
, mask
, htmp
, ceqn
, none
);
820 vp
->insns
[vp
->nr_insns
- 1].data
[3] |= NVFX_VP_INST_LAST
;
821 vp
->translated
= TRUE
;
823 tgsi_parse_free(&parse
);
827 FREE(vpc
->r_address
);
834 nvfx_vertprog_validate(struct nvfx_context
*nvfx
)
836 struct pipe_screen
*pscreen
= nvfx
->pipe
.screen
;
837 struct nvfx_screen
*screen
= nvfx
->screen
;
838 struct nouveau_channel
*chan
= screen
->base
.channel
;
839 struct nouveau_grobj
*eng3d
= screen
->eng3d
;
840 struct nvfx_vertex_program
*vp
;
841 struct pipe_buffer
*constbuf
;
842 boolean upload_code
= FALSE
, upload_data
= FALSE
;
845 if (nvfx
->render_mode
== HW
) {
847 constbuf
= nvfx
->constbuf
[PIPE_SHADER_VERTEX
];
849 if ((nvfx
->dirty
& NVFX_NEW_UCP
) ||
850 memcmp(&nvfx
->clip
, &vp
->ucp
, sizeof(vp
->ucp
))) {
851 nvfx_vertprog_destroy(nvfx
, vp
);
852 memcpy(&vp
->ucp
, &nvfx
->clip
, sizeof(vp
->ucp
));
855 vp
= nvfx
->swtnl
.vertprog
;
859 /* Translate TGSI shader into hw bytecode */
861 goto check_gpu_resources
;
863 nvfx
->fallback_swtnl
&= ~NVFX_NEW_VERTPROG
;
864 nvfx_vertprog_translate(nvfx
, vp
);
865 if (!vp
->translated
) {
866 nvfx
->fallback_swtnl
|= NVFX_NEW_VERTPROG
;
871 /* Allocate hw vtxprog exec slots */
873 struct nouveau_resource
*heap
= nvfx
->screen
->vp_exec_heap
;
874 struct nouveau_stateobj
*so
;
875 uint vplen
= vp
->nr_insns
;
877 if (nouveau_resource_alloc(heap
, vplen
, vp
, &vp
->exec
)) {
878 while (heap
->next
&& heap
->size
< vplen
) {
879 struct nvfx_vertex_program
*evict
;
881 evict
= heap
->next
->priv
;
882 nouveau_resource_free(&evict
->exec
);
885 if (nouveau_resource_alloc(heap
, vplen
, vp
, &vp
->exec
))
889 so
= so_new(3, 4, 0);
890 so_method(so
, eng3d
, NV34TCL_VP_START_FROM_ID
, 1);
891 so_data (so
, vp
->exec
->start
);
893 so_method(so
, eng3d
, NV40TCL_VP_ATTRIB_EN
, 2);
894 so_data (so
, vp
->ir
);
895 so_data (so
, vp
->or);
897 so_method(so
, eng3d
, NV34TCL_VP_CLIP_PLANES_ENABLE
, 1);
898 so_data (so
, vp
->clip_ctrl
);
905 /* Allocate hw vtxprog const slots */
906 if (vp
->nr_consts
&& !vp
->data
) {
907 struct nouveau_resource
*heap
= nvfx
->screen
->vp_data_heap
;
909 if (nouveau_resource_alloc(heap
, vp
->nr_consts
, vp
, &vp
->data
)) {
910 while (heap
->next
&& heap
->size
< vp
->nr_consts
) {
911 struct nvfx_vertex_program
*evict
;
913 evict
= heap
->next
->priv
;
914 nouveau_resource_free(&evict
->data
);
917 if (nouveau_resource_alloc(heap
, vp
->nr_consts
, vp
, &vp
->data
))
921 /*XXX: handle this some day */
922 assert(vp
->data
->start
>= vp
->data_start_min
);
925 if (vp
->data_start
!= vp
->data
->start
)
929 /* If exec or data segments moved we need to patch the program to
930 * fixup offsets and register IDs.
932 if (vp
->exec_start
!= vp
->exec
->start
) {
933 for (i
= 0; i
< vp
->nr_insns
; i
++) {
934 struct nvfx_vertex_program_exec
*vpi
= &vp
->insns
[i
];
936 if (vpi
->has_branch_offset
) {
941 vp
->exec_start
= vp
->exec
->start
;
944 if (vp
->nr_consts
&& vp
->data_start
!= vp
->data
->start
) {
945 for (i
= 0; i
< vp
->nr_insns
; i
++) {
946 struct nvfx_vertex_program_exec
*vpi
= &vp
->insns
[i
];
948 if (vpi
->const_index
>= 0) {
949 vpi
->data
[1] &= ~NVFX_VP(INST_CONST_SRC_MASK
);
951 (vpi
->const_index
+ vp
->data
->start
) <<
952 NVFX_VP(INST_CONST_SRC_SHIFT
);
957 vp
->data_start
= vp
->data
->start
;
960 /* Update + Upload constant values */
965 map
= pipe_buffer_map(pscreen
, constbuf
,
966 PIPE_BUFFER_USAGE_CPU_READ
);
969 for (i
= 0; i
< vp
->nr_consts
; i
++) {
970 struct nvfx_vertex_program_data
*vpd
= &vp
->consts
[i
];
972 if (vpd
->index
>= 0) {
974 !memcmp(vpd
->value
, &map
[vpd
->index
* 4],
977 memcpy(vpd
->value
, &map
[vpd
->index
* 4],
981 BEGIN_RING(chan
, eng3d
, NV34TCL_VP_UPLOAD_CONST_ID
, 5);
982 OUT_RING (chan
, i
+ vp
->data
->start
);
983 OUT_RINGp (chan
, (uint32_t *)vpd
->value
, 4);
987 pipe_buffer_unmap(pscreen
, constbuf
);
993 for (i
= 0; i
< vp
->nr_insns
; i
++) {
994 NOUVEAU_MSG("VP %d: 0x%08x\n", i
, vp
->insns
[i
].data
[0]);
995 NOUVEAU_MSG("VP %d: 0x%08x\n", i
, vp
->insns
[i
].data
[1]);
996 NOUVEAU_MSG("VP %d: 0x%08x\n", i
, vp
->insns
[i
].data
[2]);
997 NOUVEAU_MSG("VP %d: 0x%08x\n", i
, vp
->insns
[i
].data
[3]);
1000 BEGIN_RING(chan
, eng3d
, NV34TCL_VP_UPLOAD_FROM_ID
, 1);
1001 OUT_RING (chan
, vp
->exec
->start
);
1002 for (i
= 0; i
< vp
->nr_insns
; i
++) {
1003 BEGIN_RING(chan
, eng3d
, NV34TCL_VP_UPLOAD_INST(0), 4);
1004 OUT_RINGp (chan
, vp
->insns
[i
].data
, 4);
1008 if (vp
->so
!= nvfx
->state
.hw
[NVFX_STATE_VERTPROG
]) {
1009 so_ref(vp
->so
, &nvfx
->state
.hw
[NVFX_STATE_VERTPROG
]);
1017 nvfx_vertprog_destroy(struct nvfx_context
*nvfx
, struct nvfx_vertex_program
*vp
)
1019 vp
->translated
= FALSE
;
1027 if (vp
->nr_consts
) {
1033 nouveau_resource_free(&vp
->exec
);
1035 nouveau_resource_free(&vp
->data
);
1037 vp
->data_start_min
= 0;
1039 vp
->ir
= vp
->or = vp
->clip_ctrl
= 0;
1040 so_ref(NULL
, &vp
->so
);
1043 struct nvfx_state_entry nvfx_state_vertprog
= {
1044 .validate
= nvfx_vertprog_validate
,
1046 .pipe
= NVFX_NEW_VERTPROG
| NVFX_NEW_UCP
,
1047 .hw
= NVFX_STATE_VERTPROG
,