2 #include "pipe/p_context.h"
3 #include "pipe/p_defines.h"
4 #include "pipe/p_state.h"
5 #include "util/u_inlines.h"
6 #include "util/u_debug.h"
8 #include "pipe/p_shader_tokens.h"
9 #include "tgsi/tgsi_parse.h"
10 #include "tgsi/tgsi_util.h"
11 #include "tgsi/tgsi_dump.h"
12 #include "tgsi/tgsi_ureg.h"
14 #include "nvfx_context.h"
15 #include "nvfx_shader.h"
16 #include "nvfx_resource.h"
19 struct nvfx_pipe_fragment_program
* pfp
;
20 struct nvfx_fragment_program
*fp
;
23 unsigned long long r_temps
;
24 unsigned long long r_temps_discard
;
25 struct nvfx_reg r_result
[PIPE_MAX_SHADER_OUTPUTS
];
26 struct nvfx_reg
*r_temp
;
27 unsigned sprite_coord_temp
;
34 struct util_dynarray imm_data
;
36 struct nvfx_reg
* r_imm
;
39 unsigned char generic_to_slot
[256]; /* semantic idx for each input semantic */
41 struct util_dynarray if_stack
;
42 //struct util_dynarray loop_stack;
43 struct util_dynarray label_relocs
;
46 static INLINE
struct nvfx_reg
47 temp(struct nvfx_fpc
*fpc
)
49 int idx
= __builtin_ctzll(~fpc
->r_temps
);
51 if (idx
>= fpc
->max_temps
) {
52 NOUVEAU_ERR("out of temps!!\n");
54 return nvfx_reg(NVFXSR_TEMP
, 0);
57 fpc
->r_temps
|= (1ULL << idx
);
58 fpc
->r_temps_discard
|= (1ULL << idx
);
59 return nvfx_reg(NVFXSR_TEMP
, idx
);
63 release_temps(struct nvfx_fpc
*fpc
)
65 fpc
->r_temps
&= ~fpc
->r_temps_discard
;
66 fpc
->r_temps_discard
= 0ULL;
69 static inline struct nvfx_reg
70 nvfx_fp_imm(struct nvfx_fpc
*fpc
, float a
, float b
, float c
, float d
)
72 float v
[4] = {a
, b
, c
, d
};
73 int idx
= fpc
->imm_data
.size
>> 4;
75 memcpy(util_dynarray_grow(&fpc
->imm_data
, sizeof(float) * 4), v
, 4 * sizeof(float));
76 return nvfx_reg(NVFXSR_IMM
, idx
);
80 grow_insns(struct nvfx_fpc
*fpc
, int size
)
82 struct nvfx_fragment_program
*fp
= fpc
->fp
;
85 fp
->insn
= realloc(fp
->insn
, sizeof(uint32_t) * fp
->insn_len
);
89 emit_src(struct nvfx_fpc
*fpc
, int pos
, struct nvfx_src src
)
91 struct nvfx_fragment_program
*fp
= fpc
->fp
;
92 uint32_t *hw
= &fp
->insn
[fpc
->inst_offset
];
95 switch (src
.reg
.type
) {
97 sr
|= (NVFX_FP_REG_TYPE_INPUT
<< NVFX_FP_REG_TYPE_SHIFT
);
98 hw
[0] |= (src
.reg
.index
<< NVFX_FP_OP_INPUT_SRC_SHIFT
);
101 sr
|= NVFX_FP_REG_SRC_HALF
;
104 sr
|= (NVFX_FP_REG_TYPE_TEMP
<< NVFX_FP_REG_TYPE_SHIFT
);
105 sr
|= (src
.reg
.index
<< NVFX_FP_REG_SRC_SHIFT
);
107 case NVFXSR_RELOCATED
:
108 sr
|= (NVFX_FP_REG_TYPE_TEMP
<< NVFX_FP_REG_TYPE_SHIFT
);
109 sr
|= (fpc
->sprite_coord_temp
<< NVFX_FP_REG_SRC_SHIFT
);
110 //printf("adding relocation at %x for %x\n", fpc->inst_offset, src.index);
111 util_dynarray_append(&fpc
->fp
->slot_relocations
[src
.reg
.index
], unsigned, fpc
->inst_offset
+ pos
+ 1);
114 if (!fpc
->have_const
) {
116 hw
= &fp
->insn
[fpc
->inst_offset
];
120 memcpy(&fp
->insn
[fpc
->inst_offset
+ 4],
121 (float*)fpc
->imm_data
.data
+ src
.reg
.index
* 4,
122 sizeof(uint32_t) * 4);
124 sr
|= (NVFX_FP_REG_TYPE_CONST
<< NVFX_FP_REG_TYPE_SHIFT
);
127 if (!fpc
->have_const
) {
129 hw
= &fp
->insn
[fpc
->inst_offset
];
134 struct nvfx_fragment_program_data
*fpd
;
136 fp
->consts
= realloc(fp
->consts
, ++fp
->nr_consts
*
138 fpd
= &fp
->consts
[fp
->nr_consts
- 1];
139 fpd
->offset
= fpc
->inst_offset
+ 4;
140 fpd
->index
= src
.reg
.index
;
141 memset(&fp
->insn
[fpd
->offset
], 0, sizeof(uint32_t) * 4);
144 sr
|= (NVFX_FP_REG_TYPE_CONST
<< NVFX_FP_REG_TYPE_SHIFT
);
147 sr
|= (NVFX_FP_REG_TYPE_INPUT
<< NVFX_FP_REG_TYPE_SHIFT
);
154 sr
|= NVFX_FP_REG_NEGATE
;
157 hw
[1] |= (1 << (29 + pos
));
159 sr
|= ((src
.swz
[0] << NVFX_FP_REG_SWZ_X_SHIFT
) |
160 (src
.swz
[1] << NVFX_FP_REG_SWZ_Y_SHIFT
) |
161 (src
.swz
[2] << NVFX_FP_REG_SWZ_Z_SHIFT
) |
162 (src
.swz
[3] << NVFX_FP_REG_SWZ_W_SHIFT
));
168 emit_dst(struct nvfx_fpc
*fpc
, struct nvfx_reg dst
)
170 struct nvfx_fragment_program
*fp
= fpc
->fp
;
171 uint32_t *hw
= &fp
->insn
[fpc
->inst_offset
];
175 if (fpc
->num_regs
< (dst
.index
+ 1))
176 fpc
->num_regs
= dst
.index
+ 1;
179 if (dst
.index
== 1) {
180 fp
->fp_control
|= 0xe;
182 hw
[0] |= NVFX_FP_OP_OUT_REG_HALF
;
192 hw
[0] |= (dst
.index
<< NVFX_FP_OP_OUT_REG_SHIFT
);
196 nvfx_fp_emit(struct nvfx_fpc
*fpc
, struct nvfx_insn insn
)
198 struct nvfx_fragment_program
*fp
= fpc
->fp
;
201 fpc
->inst_offset
= fp
->insn_len
;
204 hw
= &fp
->insn
[fpc
->inst_offset
];
205 memset(hw
, 0, sizeof(uint32_t) * 4);
207 if (insn
.op
== NVFX_FP_OP_OPCODE_KIL
)
208 fp
->fp_control
|= NV30_3D_FP_CONTROL_USES_KIL
;
209 hw
[0] |= (insn
.op
<< NVFX_FP_OP_OPCODE_SHIFT
);
210 hw
[0] |= (insn
.mask
<< NVFX_FP_OP_OUTMASK_SHIFT
);
211 hw
[2] |= (insn
.scale
<< NVFX_FP_OP_DST_SCALE_SHIFT
);
214 hw
[0] |= NVFX_FP_OP_OUT_SAT
;
217 hw
[0] |= NVFX_FP_OP_COND_WRITE_ENABLE
;
218 hw
[1] |= (insn
.cc_test
<< NVFX_FP_OP_COND_SHIFT
);
219 hw
[1] |= ((insn
.cc_swz
[0] << NVFX_FP_OP_COND_SWZ_X_SHIFT
) |
220 (insn
.cc_swz
[1] << NVFX_FP_OP_COND_SWZ_Y_SHIFT
) |
221 (insn
.cc_swz
[2] << NVFX_FP_OP_COND_SWZ_Z_SHIFT
) |
222 (insn
.cc_swz
[3] << NVFX_FP_OP_COND_SWZ_W_SHIFT
));
226 hw
[0] |= (insn
.unit
<< NVFX_FP_OP_TEX_UNIT_SHIFT
);
227 fp
->samplers
|= (1 << insn
.unit
);
230 emit_dst(fpc
, insn
.dst
);
231 emit_src(fpc
, 0, insn
.src
[0]);
232 emit_src(fpc
, 1, insn
.src
[1]);
233 emit_src(fpc
, 2, insn
.src
[2]);
236 #define arith(s,o,d,m,s0,s1,s2) \
237 nvfx_insn((s), NVFX_FP_OP_OPCODE_##o, -1, \
238 (d), (m), (s0), (s1), (s2))
240 #define tex(s,o,u,d,m,s0,s1,s2) \
241 nvfx_insn((s), NVFX_FP_OP_OPCODE_##o, (u), \
242 (d), (m), (s0), none, none)
244 /* IF src.x != 0, as TGSI specifies */
246 nv40_fp_if(struct nvfx_fpc
*fpc
, struct nvfx_src src
)
248 const struct nvfx_src none
= nvfx_src(nvfx_reg(NVFXSR_NONE
, 0));
249 struct nvfx_insn insn
= arith(0, MOV
, none
.reg
, NVFX_FP_MASK_X
, src
, none
, none
);
252 nvfx_fp_emit(fpc
, insn
);
254 fpc
->inst_offset
= fpc
->fp
->insn_len
;
256 hw
= &fpc
->fp
->insn
[fpc
->inst_offset
];
257 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
258 hw
[0] = (NV40_FP_OP_BRA_OPCODE_IF
<< NVFX_FP_OP_OPCODE_SHIFT
) |
259 NV40_FP_OP_OUT_NONE
|
260 (NVFX_FP_PRECISION_FP16
<< NVFX_FP_OP_PRECISION_SHIFT
);
261 /* Use .xxxx swizzle so that we check only src[0].x*/
262 hw
[1] = (0 << NVFX_FP_OP_COND_SWZ_X_SHIFT
) |
263 (0 << NVFX_FP_OP_COND_SWZ_Y_SHIFT
) |
264 (0 << NVFX_FP_OP_COND_SWZ_Z_SHIFT
) |
265 (0 << NVFX_FP_OP_COND_SWZ_W_SHIFT
) |
266 (NVFX_FP_OP_COND_NE
<< NVFX_FP_OP_COND_SHIFT
);
267 hw
[2] = 0; /* | NV40_FP_OP_OPCODE_IS_BRANCH | else_offset */
268 hw
[3] = 0; /* | endif_offset */
269 util_dynarray_append(&fpc
->if_stack
, unsigned, fpc
->inst_offset
);
272 /* IF src.x != 0, as TGSI specifies */
274 nv40_fp_cal(struct nvfx_fpc
*fpc
, unsigned target
)
276 struct nvfx_relocation reloc
;
278 fpc
->inst_offset
= fpc
->fp
->insn_len
;
280 hw
= &fpc
->fp
->insn
[fpc
->inst_offset
];
281 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
282 hw
[0] = (NV40_FP_OP_BRA_OPCODE_CAL
<< NVFX_FP_OP_OPCODE_SHIFT
);
283 /* Use .xxxx swizzle so that we check only src[0].x*/
284 hw
[1] = (NVFX_SWZ_IDENTITY
<< NVFX_FP_OP_COND_SWZ_ALL_SHIFT
) |
285 (NVFX_FP_OP_COND_TR
<< NVFX_FP_OP_COND_SHIFT
);
286 hw
[2] = NV40_FP_OP_OPCODE_IS_BRANCH
; /* | call_offset */
288 reloc
.target
= target
;
289 reloc
.location
= fpc
->inst_offset
+ 2;
290 util_dynarray_append(&fpc
->label_relocs
, struct nvfx_relocation
, reloc
);
294 nv40_fp_ret(struct nvfx_fpc
*fpc
)
297 fpc
->inst_offset
= fpc
->fp
->insn_len
;
299 hw
= &fpc
->fp
->insn
[fpc
->inst_offset
];
300 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
301 hw
[0] = (NV40_FP_OP_BRA_OPCODE_RET
<< NVFX_FP_OP_OPCODE_SHIFT
);
302 /* Use .xxxx swizzle so that we check only src[0].x*/
303 hw
[1] = (NVFX_SWZ_IDENTITY
<< NVFX_FP_OP_COND_SWZ_ALL_SHIFT
) |
304 (NVFX_FP_OP_COND_TR
<< NVFX_FP_OP_COND_SHIFT
);
305 hw
[2] = NV40_FP_OP_OPCODE_IS_BRANCH
; /* | call_offset */
310 nv40_fp_rep(struct nvfx_fpc
*fpc
, unsigned count
, unsigned target
)
312 struct nvfx_relocation reloc
;
314 fpc
->inst_offset
= fpc
->fp
->insn_len
;
316 hw
= &fpc
->fp
->insn
[fpc
->inst_offset
];
317 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
318 hw
[0] = (NV40_FP_OP_BRA_OPCODE_REP
<< NVFX_FP_OP_OPCODE_SHIFT
) |
319 NV40_FP_OP_OUT_NONE
|
320 (NVFX_FP_PRECISION_FP16
<< NVFX_FP_OP_PRECISION_SHIFT
);
321 /* Use .xxxx swizzle so that we check only src[0].x*/
322 hw
[1] = (NVFX_SWZ_IDENTITY
<< NVFX_FP_OP_COND_SWZ_ALL_SHIFT
) |
323 (NVFX_FP_OP_COND_TR
<< NVFX_FP_OP_COND_SHIFT
);
324 hw
[2] = NV40_FP_OP_OPCODE_IS_BRANCH
|
325 (count
<< NV40_FP_OP_REP_COUNT1_SHIFT
) |
326 (count
<< NV40_FP_OP_REP_COUNT2_SHIFT
) |
327 (count
<< NV40_FP_OP_REP_COUNT3_SHIFT
);
328 hw
[3] = 0; /* | end_offset */
329 reloc
.target
= target
;
330 reloc
.location
= fpc
->inst_offset
+ 3;
331 util_dynarray_append(&fpc
->label_relocs
, struct nvfx_relocation
, reloc
);
332 //util_dynarray_append(&fpc->loop_stack, unsigned, target);
335 /* warning: this only works forward, and probably only if not inside any IF */
337 nv40_fp_bra(struct nvfx_fpc
*fpc
, unsigned target
)
339 struct nvfx_relocation reloc
;
341 fpc
->inst_offset
= fpc
->fp
->insn_len
;
343 hw
= &fpc
->fp
->insn
[fpc
->inst_offset
];
344 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
345 hw
[0] = (NV40_FP_OP_BRA_OPCODE_IF
<< NVFX_FP_OP_OPCODE_SHIFT
) |
346 NV40_FP_OP_OUT_NONE
|
347 (NVFX_FP_PRECISION_FP16
<< NVFX_FP_OP_PRECISION_SHIFT
);
348 /* Use .xxxx swizzle so that we check only src[0].x*/
349 hw
[1] = (NVFX_SWZ_IDENTITY
<< NVFX_FP_OP_COND_SWZ_X_SHIFT
) |
350 (NVFX_FP_OP_COND_FL
<< NVFX_FP_OP_COND_SHIFT
);
351 hw
[2] = NV40_FP_OP_OPCODE_IS_BRANCH
; /* | else_offset */
352 hw
[3] = 0; /* | endif_offset */
353 reloc
.target
= target
;
354 reloc
.location
= fpc
->inst_offset
+ 2;
355 util_dynarray_append(&fpc
->label_relocs
, struct nvfx_relocation
, reloc
);
356 reloc
.target
= target
;
357 reloc
.location
= fpc
->inst_offset
+ 3;
358 util_dynarray_append(&fpc
->label_relocs
, struct nvfx_relocation
, reloc
);
362 nv40_fp_brk(struct nvfx_fpc
*fpc
)
365 fpc
->inst_offset
= fpc
->fp
->insn_len
;
367 hw
= &fpc
->fp
->insn
[fpc
->inst_offset
];
368 /* I really wonder why fp16 precision is used. Presumably the hardware ignores it? */
369 hw
[0] = (NV40_FP_OP_BRA_OPCODE_BRK
<< NVFX_FP_OP_OPCODE_SHIFT
) |
371 /* Use .xxxx swizzle so that we check only src[0].x*/
372 hw
[1] = (NVFX_SWZ_IDENTITY
<< NVFX_FP_OP_COND_SWZ_X_SHIFT
) |
373 (NVFX_FP_OP_COND_TR
<< NVFX_FP_OP_COND_SHIFT
);
374 hw
[2] = NV40_FP_OP_OPCODE_IS_BRANCH
;
378 static INLINE
struct nvfx_src
379 tgsi_src(struct nvfx_fpc
*fpc
, const struct tgsi_full_src_register
*fsrc
)
383 switch (fsrc
->Register
.File
) {
384 case TGSI_FILE_INPUT
:
385 if(fpc
->pfp
->info
.input_semantic_name
[fsrc
->Register
.Index
] == TGSI_SEMANTIC_POSITION
) {
386 assert(fpc
->pfp
->info
.input_semantic_index
[fsrc
->Register
.Index
] == 0);
387 src
.reg
= nvfx_reg(NVFXSR_INPUT
, NVFX_FP_OP_INPUT_SRC_POSITION
);
388 } else if(fpc
->pfp
->info
.input_semantic_name
[fsrc
->Register
.Index
] == TGSI_SEMANTIC_COLOR
) {
389 if(fpc
->pfp
->info
.input_semantic_index
[fsrc
->Register
.Index
] == 0)
390 src
.reg
= nvfx_reg(NVFXSR_INPUT
, NVFX_FP_OP_INPUT_SRC_COL0
);
391 else if(fpc
->pfp
->info
.input_semantic_index
[fsrc
->Register
.Index
] == 1)
392 src
.reg
= nvfx_reg(NVFXSR_INPUT
, NVFX_FP_OP_INPUT_SRC_COL1
);
395 } else if(fpc
->pfp
->info
.input_semantic_name
[fsrc
->Register
.Index
] == TGSI_SEMANTIC_FOG
) {
396 assert(fpc
->pfp
->info
.input_semantic_index
[fsrc
->Register
.Index
] == 0);
397 src
.reg
= nvfx_reg(NVFXSR_INPUT
, NVFX_FP_OP_INPUT_SRC_FOGC
);
398 } else if(fpc
->pfp
->info
.input_semantic_name
[fsrc
->Register
.Index
] == TGSI_SEMANTIC_FACE
) {
399 /* TODO: check this has the correct values */
400 /* XXX: what do we do for nv30 here (assuming it lacks facing)?! */
401 assert(fpc
->pfp
->info
.input_semantic_index
[fsrc
->Register
.Index
] == 0);
402 src
.reg
= nvfx_reg(NVFXSR_INPUT
, NV40_FP_OP_INPUT_SRC_FACING
);
404 assert(fpc
->pfp
->info
.input_semantic_name
[fsrc
->Register
.Index
] == TGSI_SEMANTIC_GENERIC
);
405 src
.reg
= nvfx_reg(NVFXSR_RELOCATED
, fpc
->generic_to_slot
[fpc
->pfp
->info
.input_semantic_index
[fsrc
->Register
.Index
]]);
408 case TGSI_FILE_CONSTANT
:
409 src
.reg
= nvfx_reg(NVFXSR_CONST
, fsrc
->Register
.Index
);
411 case TGSI_FILE_IMMEDIATE
:
412 assert(fsrc
->Register
.Index
< fpc
->nr_imm
);
413 src
.reg
= fpc
->r_imm
[fsrc
->Register
.Index
];
415 case TGSI_FILE_TEMPORARY
:
416 src
.reg
= fpc
->r_temp
[fsrc
->Register
.Index
];
418 /* NV40 fragprog result regs are just temps, so this is simple */
419 case TGSI_FILE_OUTPUT
:
420 src
.reg
= fpc
->r_result
[fsrc
->Register
.Index
];
423 NOUVEAU_ERR("bad src file\n");
429 src
.abs
= fsrc
->Register
.Absolute
;
430 src
.negate
= fsrc
->Register
.Negate
;
431 src
.swz
[0] = fsrc
->Register
.SwizzleX
;
432 src
.swz
[1] = fsrc
->Register
.SwizzleY
;
433 src
.swz
[2] = fsrc
->Register
.SwizzleZ
;
434 src
.swz
[3] = fsrc
->Register
.SwizzleW
;
436 src
.indirect_reg
= 0;
437 src
.indirect_swz
= 0;
441 static INLINE
struct nvfx_reg
442 tgsi_dst(struct nvfx_fpc
*fpc
, const struct tgsi_full_dst_register
*fdst
) {
443 switch (fdst
->Register
.File
) {
444 case TGSI_FILE_OUTPUT
:
445 return fpc
->r_result
[fdst
->Register
.Index
];
446 case TGSI_FILE_TEMPORARY
:
447 return fpc
->r_temp
[fdst
->Register
.Index
];
449 return nvfx_reg(NVFXSR_NONE
, 0);
451 NOUVEAU_ERR("bad dst file %d\n", fdst
->Register
.File
);
452 return nvfx_reg(NVFXSR_NONE
, 0);
461 if (tgsi
& TGSI_WRITEMASK_X
) mask
|= NVFX_FP_MASK_X
;
462 if (tgsi
& TGSI_WRITEMASK_Y
) mask
|= NVFX_FP_MASK_Y
;
463 if (tgsi
& TGSI_WRITEMASK_Z
) mask
|= NVFX_FP_MASK_Z
;
464 if (tgsi
& TGSI_WRITEMASK_W
) mask
|= NVFX_FP_MASK_W
;
469 nvfx_fragprog_parse_instruction(struct nvfx_context
* nvfx
, struct nvfx_fpc
*fpc
,
470 const struct tgsi_full_instruction
*finst
)
472 const struct nvfx_src none
= nvfx_src(nvfx_reg(NVFXSR_NONE
, 0));
473 struct nvfx_insn insn
;
474 struct nvfx_src src
[3], tmp
;
476 int mask
, sat
, unit
= 0;
477 int ai
= -1, ci
= -1, ii
= -1;
480 if (finst
->Instruction
.Opcode
== TGSI_OPCODE_END
)
483 for (i
= 0; i
< finst
->Instruction
.NumSrcRegs
; i
++) {
484 const struct tgsi_full_src_register
*fsrc
;
486 fsrc
= &finst
->Src
[i
];
487 if (fsrc
->Register
.File
== TGSI_FILE_TEMPORARY
) {
488 src
[i
] = tgsi_src(fpc
, fsrc
);
492 for (i
= 0; i
< finst
->Instruction
.NumSrcRegs
; i
++) {
493 const struct tgsi_full_src_register
*fsrc
;
495 fsrc
= &finst
->Src
[i
];
497 switch (fsrc
->Register
.File
) {
498 case TGSI_FILE_INPUT
:
499 if(fpc
->pfp
->info
.input_semantic_name
[fsrc
->Register
.Index
] == TGSI_SEMANTIC_FOG
&& (0
500 || fsrc
->Register
.SwizzleX
== PIPE_SWIZZLE_ALPHA
501 || fsrc
->Register
.SwizzleY
== PIPE_SWIZZLE_ALPHA
502 || fsrc
->Register
.SwizzleZ
== PIPE_SWIZZLE_ALPHA
503 || fsrc
->Register
.SwizzleW
== PIPE_SWIZZLE_ALPHA
505 /* hardware puts 0 in fogcoord.w, but GL/Gallium want 1 there */
506 struct nvfx_src addend
= nvfx_src(nvfx_fp_imm(fpc
, 0, 0, 0, 1));
507 addend
.swz
[0] = fsrc
->Register
.SwizzleX
;
508 addend
.swz
[1] = fsrc
->Register
.SwizzleY
;
509 addend
.swz
[2] = fsrc
->Register
.SwizzleZ
;
510 addend
.swz
[3] = fsrc
->Register
.SwizzleW
;
511 src
[i
] = nvfx_src(temp(fpc
));
512 nvfx_fp_emit(fpc
, arith(0, ADD
, src
[i
].reg
, NVFX_FP_MASK_ALL
, tgsi_src(fpc
, fsrc
), addend
, none
));
513 } else if (ai
== -1 || ai
== fsrc
->Register
.Index
) {
514 ai
= fsrc
->Register
.Index
;
515 src
[i
] = tgsi_src(fpc
, fsrc
);
517 src
[i
] = nvfx_src(temp(fpc
));
518 nvfx_fp_emit(fpc
, arith(0, MOV
, src
[i
].reg
, NVFX_FP_MASK_ALL
, tgsi_src(fpc
, fsrc
), none
, none
));
521 case TGSI_FILE_CONSTANT
:
522 if ((ci
== -1 && ii
== -1) ||
523 ci
== fsrc
->Register
.Index
) {
524 ci
= fsrc
->Register
.Index
;
525 src
[i
] = tgsi_src(fpc
, fsrc
);
527 src
[i
] = nvfx_src(temp(fpc
));
528 nvfx_fp_emit(fpc
, arith(0, MOV
, src
[i
].reg
, NVFX_FP_MASK_ALL
, tgsi_src(fpc
, fsrc
), none
, none
));
531 case TGSI_FILE_IMMEDIATE
:
532 if ((ci
== -1 && ii
== -1) ||
533 ii
== fsrc
->Register
.Index
) {
534 ii
= fsrc
->Register
.Index
;
535 src
[i
] = tgsi_src(fpc
, fsrc
);
537 src
[i
] = nvfx_src(temp(fpc
));
538 nvfx_fp_emit(fpc
, arith(0, MOV
, src
[i
].reg
, NVFX_FP_MASK_ALL
, tgsi_src(fpc
, fsrc
), none
, none
));
541 case TGSI_FILE_TEMPORARY
:
544 case TGSI_FILE_SAMPLER
:
545 unit
= fsrc
->Register
.Index
;
547 case TGSI_FILE_OUTPUT
:
550 NOUVEAU_ERR("bad src file\n");
555 dst
= tgsi_dst(fpc
, &finst
->Dst
[0]);
556 mask
= tgsi_mask(finst
->Dst
[0].Register
.WriteMask
);
557 sat
= (finst
->Instruction
.Saturate
== TGSI_SAT_ZERO_ONE
);
559 switch (finst
->Instruction
.Opcode
) {
560 case TGSI_OPCODE_ABS
:
561 nvfx_fp_emit(fpc
, arith(sat
, MOV
, dst
, mask
, abs(src
[0]), none
, none
));
563 case TGSI_OPCODE_ADD
:
564 nvfx_fp_emit(fpc
, arith(sat
, ADD
, dst
, mask
, src
[0], src
[1], none
));
566 case TGSI_OPCODE_CMP
:
567 insn
= arith(0, MOV
, none
.reg
, mask
, src
[0], none
, none
);
569 nvfx_fp_emit(fpc
, insn
);
571 insn
= arith(sat
, MOV
, dst
, mask
, src
[2], none
, none
);
572 insn
.cc_test
= NVFX_COND_GE
;
573 nvfx_fp_emit(fpc
, insn
);
575 insn
= arith(sat
, MOV
, dst
, mask
, src
[1], none
, none
);
576 insn
.cc_test
= NVFX_COND_LT
;
577 nvfx_fp_emit(fpc
, insn
);
579 case TGSI_OPCODE_COS
:
580 nvfx_fp_emit(fpc
, arith(sat
, COS
, dst
, mask
, src
[0], none
, none
));
582 case TGSI_OPCODE_DDX
:
583 if (mask
& (NVFX_FP_MASK_Z
| NVFX_FP_MASK_W
)) {
584 tmp
= nvfx_src(temp(fpc
));
585 nvfx_fp_emit(fpc
, arith(sat
, DDX
, tmp
.reg
, NVFX_FP_MASK_X
| NVFX_FP_MASK_Y
, swz(src
[0], Z
, W
, Z
, W
), none
, none
));
586 nvfx_fp_emit(fpc
, arith(0, MOV
, tmp
.reg
, NVFX_FP_MASK_Z
| NVFX_FP_MASK_W
, swz(tmp
, X
, Y
, X
, Y
), none
, none
));
587 nvfx_fp_emit(fpc
, arith(sat
, DDX
, tmp
.reg
, NVFX_FP_MASK_X
| NVFX_FP_MASK_Y
, src
[0], none
, none
));
588 nvfx_fp_emit(fpc
, arith(0, MOV
, dst
, mask
, tmp
, none
, none
));
590 nvfx_fp_emit(fpc
, arith(sat
, DDX
, dst
, mask
, src
[0], none
, none
));
593 case TGSI_OPCODE_DDY
:
594 if (mask
& (NVFX_FP_MASK_Z
| NVFX_FP_MASK_W
)) {
595 tmp
= nvfx_src(temp(fpc
));
596 nvfx_fp_emit(fpc
, arith(sat
, DDY
, tmp
.reg
, NVFX_FP_MASK_X
| NVFX_FP_MASK_Y
, swz(src
[0], Z
, W
, Z
, W
), none
, none
));
597 nvfx_fp_emit(fpc
, arith(0, MOV
, tmp
.reg
, NVFX_FP_MASK_Z
| NVFX_FP_MASK_W
, swz(tmp
, X
, Y
, X
, Y
), none
, none
));
598 nvfx_fp_emit(fpc
, arith(sat
, DDY
, tmp
.reg
, NVFX_FP_MASK_X
| NVFX_FP_MASK_Y
, src
[0], none
, none
));
599 nvfx_fp_emit(fpc
, arith(0, MOV
, dst
, mask
, tmp
, none
, none
));
601 nvfx_fp_emit(fpc
, arith(sat
, DDY
, dst
, mask
, src
[0], none
, none
));
604 case TGSI_OPCODE_DP2
:
605 tmp
= nvfx_src(temp(fpc
));
606 nvfx_fp_emit(fpc
, arith(0, MUL
, tmp
.reg
, NVFX_FP_MASK_X
| NVFX_FP_MASK_Y
, src
[0], src
[1], none
));
607 nvfx_fp_emit(fpc
, arith(0, ADD
, dst
, mask
, swz(tmp
, X
, X
, X
, X
), swz(tmp
, Y
, Y
, Y
, Y
), none
));
609 case TGSI_OPCODE_DP3
:
610 nvfx_fp_emit(fpc
, arith(sat
, DP3
, dst
, mask
, src
[0], src
[1], none
));
612 case TGSI_OPCODE_DP4
:
613 nvfx_fp_emit(fpc
, arith(sat
, DP4
, dst
, mask
, src
[0], src
[1], none
));
615 case TGSI_OPCODE_DPH
:
616 tmp
= nvfx_src(temp(fpc
));
617 nvfx_fp_emit(fpc
, arith(0, DP3
, tmp
.reg
, NVFX_FP_MASK_X
, src
[0], src
[1], none
));
618 nvfx_fp_emit(fpc
, arith(sat
, ADD
, dst
, mask
, swz(tmp
, X
, X
, X
, X
), swz(src
[1], W
, W
, W
, W
), none
));
620 case TGSI_OPCODE_DST
:
621 nvfx_fp_emit(fpc
, arith(sat
, DST
, dst
, mask
, src
[0], src
[1], none
));
623 case TGSI_OPCODE_EX2
:
624 nvfx_fp_emit(fpc
, arith(sat
, EX2
, dst
, mask
, src
[0], none
, none
));
626 case TGSI_OPCODE_FLR
:
627 nvfx_fp_emit(fpc
, arith(sat
, FLR
, dst
, mask
, src
[0], none
, none
));
629 case TGSI_OPCODE_FRC
:
630 nvfx_fp_emit(fpc
, arith(sat
, FRC
, dst
, mask
, src
[0], none
, none
));
632 case TGSI_OPCODE_KILP
:
633 nvfx_fp_emit(fpc
, arith(0, KIL
, none
.reg
, 0, none
, none
, none
));
635 case TGSI_OPCODE_KIL
:
636 insn
= arith(0, MOV
, none
.reg
, NVFX_FP_MASK_ALL
, src
[0], none
, none
);
638 nvfx_fp_emit(fpc
, insn
);
640 insn
= arith(0, KIL
, none
.reg
, 0, none
, none
, none
);
641 insn
.cc_test
= NVFX_COND_LT
;
642 nvfx_fp_emit(fpc
, insn
);
644 case TGSI_OPCODE_LG2
:
645 nvfx_fp_emit(fpc
, arith(sat
, LG2
, dst
, mask
, src
[0], none
, none
));
647 case TGSI_OPCODE_LIT
:
649 nvfx_fp_emit(fpc
, arith(sat
, LIT_NV30
, dst
, mask
, src
[0], src
[1], src
[2]));
651 /* we use FLT_MIN, so that log2 never gives -infinity, and thus multiplication by
652 * specular 0 always gives 0, so that ex2 gives 1, to satisfy the 0^0 = 1 requirement
654 * NOTE: if we start using half precision, we might need an fp16 FLT_MIN here instead
656 struct nvfx_src maxs
= nvfx_src(nvfx_fp_imm(fpc
, 0, FLT_MIN
, 0, 0));
657 tmp
= nvfx_src(temp(fpc
));
658 if (ci
>= 0 || ii
>= 0) {
659 nvfx_fp_emit(fpc
, arith(0, MOV
, tmp
.reg
, NVFX_FP_MASK_X
| NVFX_FP_MASK_Y
, maxs
, none
, none
));
662 nvfx_fp_emit(fpc
, arith(0, MAX
, tmp
.reg
, NVFX_FP_MASK_Y
| NVFX_FP_MASK_W
, swz(src
[0], X
, X
, X
, Y
), swz(maxs
, X
, X
, Y
, Y
), none
));
663 nvfx_fp_emit(fpc
, arith(0, LG2
, tmp
.reg
, NVFX_FP_MASK_W
, swz(tmp
, W
, W
, W
, W
), none
, none
));
664 nvfx_fp_emit(fpc
, arith(0, MUL
, tmp
.reg
, NVFX_FP_MASK_W
, swz(tmp
, W
, W
, W
, W
), swz(src
[0], W
, W
, W
, W
), none
));
665 nvfx_fp_emit(fpc
, arith(sat
, LITEX2_NV40
, dst
, mask
, swz(tmp
, Y
, Y
, W
, W
), none
, none
));
668 case TGSI_OPCODE_LRP
:
670 nvfx_fp_emit(fpc
, arith(sat
, LRP_NV30
, dst
, mask
, src
[0], src
[1], src
[2]));
672 tmp
= nvfx_src(temp(fpc
));
673 nvfx_fp_emit(fpc
, arith(0, MAD
, tmp
.reg
, mask
, neg(src
[0]), src
[2], src
[2]));
674 nvfx_fp_emit(fpc
, arith(sat
, MAD
, dst
, mask
, src
[0], src
[1], tmp
));
677 case TGSI_OPCODE_MAD
:
678 nvfx_fp_emit(fpc
, arith(sat
, MAD
, dst
, mask
, src
[0], src
[1], src
[2]));
680 case TGSI_OPCODE_MAX
:
681 nvfx_fp_emit(fpc
, arith(sat
, MAX
, dst
, mask
, src
[0], src
[1], none
));
683 case TGSI_OPCODE_MIN
:
684 nvfx_fp_emit(fpc
, arith(sat
, MIN
, dst
, mask
, src
[0], src
[1], none
));
686 case TGSI_OPCODE_MOV
:
687 nvfx_fp_emit(fpc
, arith(sat
, MOV
, dst
, mask
, src
[0], none
, none
));
689 case TGSI_OPCODE_MUL
:
690 nvfx_fp_emit(fpc
, arith(sat
, MUL
, dst
, mask
, src
[0], src
[1], none
));
692 case TGSI_OPCODE_NOP
:
694 case TGSI_OPCODE_POW
:
696 nvfx_fp_emit(fpc
, arith(sat
, POW_NV30
, dst
, mask
, src
[0], src
[1], none
));
698 tmp
= nvfx_src(temp(fpc
));
699 nvfx_fp_emit(fpc
, arith(0, LG2
, tmp
.reg
, NVFX_FP_MASK_X
, swz(src
[0], X
, X
, X
, X
), none
, none
));
700 nvfx_fp_emit(fpc
, arith(0, MUL
, tmp
.reg
, NVFX_FP_MASK_X
, swz(tmp
, X
, X
, X
, X
), swz(src
[1], X
, X
, X
, X
), none
));
701 nvfx_fp_emit(fpc
, arith(sat
, EX2
, dst
, mask
, swz(tmp
, X
, X
, X
, X
), none
, none
));
704 case TGSI_OPCODE_RCP
:
705 nvfx_fp_emit(fpc
, arith(sat
, RCP
, dst
, mask
, src
[0], none
, none
));
707 case TGSI_OPCODE_RFL
:
709 nvfx_fp_emit(fpc
, arith(0, RFL_NV30
, dst
, mask
, src
[0], src
[1], none
));
711 tmp
= nvfx_src(temp(fpc
));
712 nvfx_fp_emit(fpc
, arith(0, DP3
, tmp
.reg
, NVFX_FP_MASK_X
, src
[0], src
[0], none
));
713 nvfx_fp_emit(fpc
, arith(0, DP3
, tmp
.reg
, NVFX_FP_MASK_Y
, src
[0], src
[1], none
));
714 insn
= arith(0, DIV
, tmp
.reg
, NVFX_FP_MASK_Z
, swz(tmp
, Y
, Y
, Y
, Y
), swz(tmp
, X
, X
, X
, X
), none
);
715 insn
.scale
= NVFX_FP_OP_DST_SCALE_2X
;
716 nvfx_fp_emit(fpc
, insn
);
717 nvfx_fp_emit(fpc
, arith(sat
, MAD
, dst
, mask
, swz(tmp
, Z
, Z
, Z
, Z
), src
[0], neg(src
[1])));
720 case TGSI_OPCODE_RSQ
:
722 nvfx_fp_emit(fpc
, arith(sat
, RSQ_NV30
, dst
, mask
, abs(swz(src
[0], X
, X
, X
, X
)), none
, none
));
724 tmp
= nvfx_src(temp(fpc
));
725 insn
= arith(0, LG2
, tmp
.reg
, NVFX_FP_MASK_X
, abs(swz(src
[0], X
, X
, X
, X
)), none
, none
);
726 insn
.scale
= NVFX_FP_OP_DST_SCALE_INV_2X
;
727 nvfx_fp_emit(fpc
, insn
);
728 nvfx_fp_emit(fpc
, arith(sat
, EX2
, dst
, mask
, neg(swz(tmp
, X
, X
, X
, X
)), none
, none
));
731 case TGSI_OPCODE_SCS
:
732 /* avoid overwriting the source */
733 if(src
[0].swz
[NVFX_SWZ_X
] != NVFX_SWZ_X
)
735 if (mask
& NVFX_FP_MASK_X
)
736 nvfx_fp_emit(fpc
, arith(sat
, COS
, dst
, NVFX_FP_MASK_X
, swz(src
[0], X
, X
, X
, X
), none
, none
));
737 if (mask
& NVFX_FP_MASK_Y
)
738 nvfx_fp_emit(fpc
, arith(sat
, SIN
, dst
, NVFX_FP_MASK_Y
, swz(src
[0], X
, X
, X
, X
), none
, none
));
742 if (mask
& NVFX_FP_MASK_Y
)
743 nvfx_fp_emit(fpc
, arith(sat
, SIN
, dst
, NVFX_FP_MASK_Y
, swz(src
[0], X
, X
, X
, X
), none
, none
));
744 if (mask
& NVFX_FP_MASK_X
)
745 nvfx_fp_emit(fpc
, arith(sat
, COS
, dst
, NVFX_FP_MASK_X
, swz(src
[0], X
, X
, X
, X
), none
, none
));
748 case TGSI_OPCODE_SEQ
:
749 nvfx_fp_emit(fpc
, arith(sat
, SEQ
, dst
, mask
, src
[0], src
[1], none
));
751 case TGSI_OPCODE_SFL
:
752 nvfx_fp_emit(fpc
, arith(sat
, SFL
, dst
, mask
, src
[0], src
[1], none
));
754 case TGSI_OPCODE_SGE
:
755 nvfx_fp_emit(fpc
, arith(sat
, SGE
, dst
, mask
, src
[0], src
[1], none
));
757 case TGSI_OPCODE_SGT
:
758 nvfx_fp_emit(fpc
, arith(sat
, SGT
, dst
, mask
, src
[0], src
[1], none
));
760 case TGSI_OPCODE_SIN
:
761 nvfx_fp_emit(fpc
, arith(sat
, SIN
, dst
, mask
, src
[0], none
, none
));
763 case TGSI_OPCODE_SLE
:
764 nvfx_fp_emit(fpc
, arith(sat
, SLE
, dst
, mask
, src
[0], src
[1], none
));
766 case TGSI_OPCODE_SLT
:
767 nvfx_fp_emit(fpc
, arith(sat
, SLT
, dst
, mask
, src
[0], src
[1], none
));
769 case TGSI_OPCODE_SNE
:
770 nvfx_fp_emit(fpc
, arith(sat
, SNE
, dst
, mask
, src
[0], src
[1], none
));
772 case TGSI_OPCODE_SSG
:
774 struct nvfx_src minones
= swz(nvfx_src(nvfx_fp_imm(fpc
, -1, -1, -1, -1)), X
, X
, X
, X
);
776 insn
= arith(sat
, MOV
, dst
, mask
, src
[0], none
, none
);
778 nvfx_fp_emit(fpc
, insn
);
780 insn
= arith(0, STR
, dst
, mask
, none
, none
, none
);
781 insn
.cc_test
= NVFX_COND_GT
;
782 nvfx_fp_emit(fpc
, insn
);
785 insn
= arith(0, MOV
, dst
, mask
, minones
, none
, none
);
786 insn
.cc_test
= NVFX_COND_LT
;
787 nvfx_fp_emit(fpc
, insn
);
791 case TGSI_OPCODE_STR
:
792 nvfx_fp_emit(fpc
, arith(sat
, STR
, dst
, mask
, src
[0], src
[1], none
));
794 case TGSI_OPCODE_SUB
:
795 nvfx_fp_emit(fpc
, arith(sat
, ADD
, dst
, mask
, src
[0], neg(src
[1]), none
));
797 case TGSI_OPCODE_TEX
:
798 nvfx_fp_emit(fpc
, tex(sat
, TEX
, unit
, dst
, mask
, src
[0], none
, none
));
800 case TGSI_OPCODE_TRUNC
:
801 tmp
= nvfx_src(temp(fpc
));
802 insn
= arith(0, MOV
, none
.reg
, mask
, src
[0], none
, none
);
804 nvfx_fp_emit(fpc
, insn
);
806 nvfx_fp_emit(fpc
, arith(0, FLR
, tmp
.reg
, mask
, abs(src
[0]), none
, none
));
807 nvfx_fp_emit(fpc
, arith(sat
, MOV
, dst
, mask
, tmp
, none
, none
));
809 insn
= arith(sat
, MOV
, dst
, mask
, neg(tmp
), none
, none
);
810 insn
.cc_test
= NVFX_COND_LT
;
811 nvfx_fp_emit(fpc
, insn
);
813 case TGSI_OPCODE_TXB
:
814 nvfx_fp_emit(fpc
, tex(sat
, TXB
, unit
, dst
, mask
, src
[0], none
, none
));
816 case TGSI_OPCODE_TXL
:
818 nvfx_fp_emit(fpc
, tex(sat
, TXL_NV40
, unit
, dst
, mask
, src
[0], none
, none
));
819 else /* unsupported on nv30, use TEX and hope they like it */
820 nvfx_fp_emit(fpc
, tex(sat
, TEX
, unit
, dst
, mask
, src
[0], none
, none
));
822 case TGSI_OPCODE_TXP
:
823 nvfx_fp_emit(fpc
, tex(sat
, TXP
, unit
, dst
, mask
, src
[0], none
, none
));
825 case TGSI_OPCODE_XPD
:
826 tmp
= nvfx_src(temp(fpc
));
827 nvfx_fp_emit(fpc
, arith(0, MUL
, tmp
.reg
, mask
, swz(src
[0], Z
, X
, Y
, Y
), swz(src
[1], Y
, Z
, X
, X
), none
));
828 nvfx_fp_emit(fpc
, arith(sat
, MAD
, dst
, (mask
& ~NVFX_FP_MASK_W
), swz(src
[0], Y
, Z
, X
, X
), swz(src
[1], Z
, X
, Y
, Y
), neg(tmp
)));
832 // MOVRC0 R31 (TR0.xyzw), R<src>:
833 // IF (NE.xxxx) ELSE <else> END <end>
836 nv40_fp_if(fpc
, src
[0]);
839 case TGSI_OPCODE_ELSE
:
844 assert(util_dynarray_contains(&fpc
->if_stack
, unsigned));
845 hw
= &fpc
->fp
->insn
[util_dynarray_top(&fpc
->if_stack
, unsigned)];
846 hw
[2] = NV40_FP_OP_OPCODE_IS_BRANCH
| fpc
->fp
->insn_len
;
850 case TGSI_OPCODE_ENDIF
:
855 assert(util_dynarray_contains(&fpc
->if_stack
, unsigned));
856 hw
= &fpc
->fp
->insn
[util_dynarray_pop(&fpc
->if_stack
, unsigned)];
858 hw
[2] = NV40_FP_OP_OPCODE_IS_BRANCH
| fpc
->fp
->insn_len
;
859 hw
[3] = fpc
->fp
->insn_len
;
863 case TGSI_OPCODE_BRA
:
864 /* This can in limited cases be implemented with an IF with the else and endif labels pointing to the target */
865 /* no state tracker uses this, so don't implement this for now */
867 nv40_fp_bra(fpc
, finst
->Label
.Label
);
870 case TGSI_OPCODE_BGNSUB
:
871 case TGSI_OPCODE_ENDSUB
:
872 /* nothing to do here */
875 case TGSI_OPCODE_CAL
:
878 nv40_fp_cal(fpc
, finst
->Label
.Label
);
881 case TGSI_OPCODE_RET
:
887 case TGSI_OPCODE_BGNLOOP
:
890 /* TODO: we should support using two nested REPs to allow a > 255 iteration count */
891 nv40_fp_rep(fpc
, 255, finst
->Label
.Label
);
894 case TGSI_OPCODE_ENDLOOP
:
897 case TGSI_OPCODE_BRK
:
903 case TGSI_OPCODE_CONT
:
905 static int warned
= 0;
907 NOUVEAU_ERR("Sorry, the continue keyword is not implemented: ignoring it.\n");
914 NOUVEAU_ERR("invalid opcode %d\n", finst
->Instruction
.Opcode
);
923 static int warned
= 0;
926 "Sorry, control flow instructions are not supported in hardware on nv3x: ignoring them\n"
927 "If rendering is incorrect, try to disable GLSL support in the application.\n");
935 nvfx_fragprog_parse_decl_output(struct nvfx_context
* nvfx
, struct nvfx_fpc
*fpc
,
936 const struct tgsi_full_declaration
*fdec
)
938 unsigned idx
= fdec
->Range
.First
;
941 switch (fdec
->Semantic
.Name
) {
942 case TGSI_SEMANTIC_POSITION
:
945 case TGSI_SEMANTIC_COLOR
:
947 switch (fdec
->Semantic
.Index
) {
948 case 0: hw
= 0; break;
949 case 1: hw
= 2; break;
950 case 2: hw
= 3; break;
951 case 3: hw
= 4; break;
953 if(hw
> ((nvfx
->use_nv4x
) ? 4 : 2)) {
954 NOUVEAU_ERR("bad rcol index\n");
959 NOUVEAU_ERR("bad output semantic\n");
963 fpc
->r_result
[idx
] = nvfx_reg(NVFXSR_OUTPUT
, hw
);
964 fpc
->r_temps
|= (1ULL << hw
);
969 nvfx_fragprog_prepare(struct nvfx_context
* nvfx
, struct nvfx_fpc
*fpc
)
971 struct tgsi_parse_context p
;
972 int high_temp
= -1, i
;
973 struct util_semantic_set set
;
974 unsigned num_texcoords
= nvfx
->use_nv4x
? 10 : 8;
976 fpc
->fp
->num_slots
= util_semantic_set_from_program_file(&set
, fpc
->pfp
->pipe
.tokens
, TGSI_FILE_INPUT
);
977 if(fpc
->fp
->num_slots
> num_texcoords
)
979 util_semantic_layout_from_set(fpc
->fp
->slot_to_generic
, &set
, 0, num_texcoords
);
980 util_semantic_table_from_layout(fpc
->generic_to_slot
, fpc
->fp
->slot_to_generic
, 0, num_texcoords
);
982 memset(fpc
->fp
->slot_to_fp_input
, 0xff, sizeof(fpc
->fp
->slot_to_fp_input
));
984 fpc
->r_imm
= CALLOC(fpc
->pfp
->info
.immediate_count
, sizeof(struct nvfx_reg
));
986 tgsi_parse_init(&p
, fpc
->pfp
->pipe
.tokens
);
987 while (!tgsi_parse_end_of_tokens(&p
)) {
988 const union tgsi_full_token
*tok
= &p
.FullToken
;
990 tgsi_parse_token(&p
);
991 switch(tok
->Token
.Type
) {
992 case TGSI_TOKEN_TYPE_DECLARATION
:
994 const struct tgsi_full_declaration
*fdec
;
995 fdec
= &p
.FullToken
.FullDeclaration
;
996 switch (fdec
->Declaration
.File
) {
997 case TGSI_FILE_OUTPUT
:
998 if (!nvfx_fragprog_parse_decl_output(nvfx
, fpc
, fdec
))
1001 case TGSI_FILE_TEMPORARY
:
1002 if (fdec
->Range
.Last
> high_temp
) {
1012 case TGSI_TOKEN_TYPE_IMMEDIATE
:
1014 struct tgsi_full_immediate
*imm
;
1016 imm
= &p
.FullToken
.FullImmediate
;
1017 assert(imm
->Immediate
.DataType
== TGSI_IMM_FLOAT32
);
1018 assert(fpc
->nr_imm
< fpc
->pfp
->info
.immediate_count
);
1020 fpc
->r_imm
[fpc
->nr_imm
++] = nvfx_fp_imm(fpc
, imm
->u
[0].Float
, imm
->u
[1].Float
, imm
->u
[2].Float
, imm
->u
[3].Float
);
1027 tgsi_parse_free(&p
);
1030 fpc
->r_temp
= CALLOC(high_temp
, sizeof(struct nvfx_reg
));
1031 for (i
= 0; i
< high_temp
; i
++)
1032 fpc
->r_temp
[i
] = temp(fpc
);
1033 fpc
->r_temps_discard
= 0ULL;
1043 tgsi_parse_free(&p
);
1047 DEBUG_GET_ONCE_BOOL_OPTION(nvfx_dump_fp
, "NVFX_DUMP_FP", FALSE
)
1049 static struct nvfx_fragment_program
*
1050 nvfx_fragprog_translate(struct nvfx_context
*nvfx
,
1051 struct nvfx_pipe_fragment_program
*pfp
,
1052 boolean emulate_sprite_flipping
)
1054 struct tgsi_parse_context parse
;
1055 struct nvfx_fpc
*fpc
= NULL
;
1056 struct util_dynarray insns
;
1057 struct nvfx_fragment_program
* fp
= NULL
;
1058 const int min_size
= 4096;
1060 fp
= CALLOC_STRUCT(nvfx_fragment_program
);
1064 fpc
= CALLOC_STRUCT(nvfx_fpc
);
1068 fpc
->max_temps
= nvfx
->use_nv4x
? 48 : 32;
1073 for (unsigned i
= 0; i
< pfp
->info
.num_properties
; ++i
) {
1074 if (pfp
->info
.properties
[i
].name
== TGSI_PROPERTY_FS_COORD_ORIGIN
) {
1075 if(pfp
->info
.properties
[i
].data
[0])
1076 fp
->coord_conventions
|= NV30_3D_COORD_CONVENTIONS_ORIGIN_INVERTED
;
1077 } else if (pfp
->info
.properties
[i
].name
== TGSI_PROPERTY_FS_COORD_PIXEL_CENTER
) {
1078 if(pfp
->info
.properties
[i
].data
[0])
1079 fp
->coord_conventions
|= NV30_3D_COORD_CONVENTIONS_CENTER_INTEGER
;
1083 if (!nvfx_fragprog_prepare(nvfx
, fpc
))
1086 tgsi_parse_init(&parse
, pfp
->pipe
.tokens
);
1087 util_dynarray_init(&insns
);
1089 if(emulate_sprite_flipping
)
1091 struct nvfx_reg reg
= temp(fpc
);
1092 struct nvfx_src sprite_input
= nvfx_src(nvfx_reg(NVFXSR_RELOCATED
, fp
->num_slots
));
1093 struct nvfx_src imm
= nvfx_src(nvfx_fp_imm(fpc
, 1, -1, 0, 0));
1095 fpc
->sprite_coord_temp
= reg
.index
;
1096 fpc
->r_temps_discard
= 0ULL;
1097 nvfx_fp_emit(fpc
, arith(0, MAD
, reg
, NVFX_FP_MASK_ALL
, sprite_input
, swz(imm
, X
, Y
, X
, X
), swz(imm
, Z
, X
, Z
, Z
)));
1100 while (!tgsi_parse_end_of_tokens(&parse
)) {
1101 tgsi_parse_token(&parse
);
1103 switch (parse
.FullToken
.Token
.Type
) {
1104 case TGSI_TOKEN_TYPE_INSTRUCTION
:
1106 const struct tgsi_full_instruction
*finst
;
1108 util_dynarray_append(&insns
, unsigned, fp
->insn_len
);
1109 finst
= &parse
.FullToken
.FullInstruction
;
1110 if (!nvfx_fragprog_parse_instruction(nvfx
, fpc
, finst
))
1118 util_dynarray_append(&insns
, unsigned, fp
->insn_len
);
1120 for(unsigned i
= 0; i
< fpc
->label_relocs
.size
; i
+= sizeof(struct nvfx_relocation
))
1122 struct nvfx_relocation
* label_reloc
= (struct nvfx_relocation
*)((char*)fpc
->label_relocs
.data
+ i
);
1123 fp
->insn
[label_reloc
->location
] |= ((unsigned*)insns
.data
)[label_reloc
->target
];
1125 util_dynarray_fini(&insns
);
1128 fp
->fp_control
|= (fpc
->num_regs
-1)/2;
1130 fp
->fp_control
|= fpc
->num_regs
<< NV40_3D_FP_CONTROL_TEMP_COUNT__SHIFT
;
1132 /* Terminate final instruction */
1134 fp
->insn
[fpc
->inst_offset
] |= 0x00000001;
1136 /* Append NOP + END instruction for branches to the end of the program */
1137 fpc
->inst_offset
= fp
->insn_len
;
1139 fp
->insn
[fpc
->inst_offset
+ 0] = 0x00000001;
1140 fp
->insn
[fpc
->inst_offset
+ 1] = 0x00000000;
1141 fp
->insn
[fpc
->inst_offset
+ 2] = 0x00000000;
1142 fp
->insn
[fpc
->inst_offset
+ 3] = 0x00000000;
1144 if(debug_get_option_nvfx_dump_fp())
1147 tgsi_dump(pfp
->pipe
.tokens
, 0);
1149 debug_printf("\n%s fragment program:\n", nvfx
->is_nv4x
? "nv4x" : "nv3x");
1150 for (unsigned i
= 0; i
< fp
->insn_len
; i
+= 4)
1151 debug_printf("%3u: %08x %08x %08x %08x\n", i
>> 2, fp
->insn
[i
], fp
->insn
[i
+ 1], fp
->insn
[i
+ 2], fp
->insn
[i
+ 3]);
1155 fp
->prog_size
= (fp
->insn_len
* 4 + 63) & ~63;
1157 if(fp
->prog_size
>= min_size
)
1158 fp
->progs_per_bo
= 1;
1160 fp
->progs_per_bo
= min_size
/ fp
->prog_size
;
1161 fp
->bo_prog_idx
= fp
->progs_per_bo
- 1;
1164 tgsi_parse_free(&parse
);
1169 util_dynarray_fini(&fpc
->if_stack
);
1170 util_dynarray_fini(&fpc
->label_relocs
);
1171 util_dynarray_fini(&fpc
->imm_data
);
1172 //util_dynarray_fini(&fpc->loop_stack);
1178 _debug_printf("Error: failed to compile this fragment program:\n");
1179 tgsi_dump(pfp
->pipe
.tokens
, 0);
1190 nvfx_fp_memcpy(void* dst
, const void* src
, size_t len
)
1192 #ifndef PIPE_ARCH_BIG_ENDIAN
1193 memcpy(dst
, src
, len
);
1196 for(i
= 0; i
< len
; i
+= 4) {
1197 uint32_t v
= *(uint32_t*)((char*)src
+ i
);
1198 *(uint32_t*)((char*)dst
+ i
) = (v
>> 16) | (v
<< 16);
1203 /* The hardware only supports immediate constants inside the fragment program,
1204 * and at least on nv30 doesn't support an indirect linkage table.
1206 * Hence, we need to patch the fragment program itself both to update constants
1207 * and update linkage.
1209 * Using a single fragment program would entail unacceptable stalls if the GPU is
1210 * already rendering with that fragment program.
1211 * Thus, we instead use a "rotating queue" of buffer objects, each of which is
1212 * packed with multiple versions of the same program.
1214 * Whenever we need to patch something, we move to the next program and
1215 * patch it. If all buffer objects are in use by the GPU, we allocate another one,
1216 * expanding the queue.
1218 * As an additional optimization, we record when all the programs have the
1219 * current input slot configuration, and at that point we stop patching inputs.
1220 * This happens, for instance, if a given fragment program is always used with
1221 * the same vertex program (i.e. always with GLSL), or if the layouts match
1222 * enough (non-GLSL).
1224 * Note that instead of using multiple programs, we could push commands
1225 * on the FIFO to patch a single program: it's not fully clear which option is
1226 * faster, but my guess is that the current way is faster.
1228 * We also track the previous slot assignments for each version and don't
1229 * patch if they are the same (this could perhaps be removed).
1233 nvfx_fragprog_validate(struct nvfx_context
*nvfx
)
1235 struct nouveau_channel
* chan
= nvfx
->screen
->base
.channel
;
1236 struct nouveau_grobj
*eng3d
= nvfx
->screen
->eng3d
;
1237 struct nvfx_pipe_fragment_program
*pfp
= nvfx
->fragprog
;
1238 struct nvfx_vertex_program
* vp
;
1240 // TODO: the multiplication by point_quad_rasterization is probably superfluous
1241 unsigned sprite_coord_enable
= nvfx
->rasterizer
->pipe
.point_quad_rasterization
* nvfx
->rasterizer
->pipe
.sprite_coord_enable
;
1243 boolean emulate_sprite_flipping
= sprite_coord_enable
&& nvfx
->rasterizer
->pipe
.sprite_coord_mode
;
1244 unsigned key
= emulate_sprite_flipping
;
1245 struct nvfx_fragment_program
* fp
;
1250 fp
= nvfx_fragprog_translate(nvfx
, pfp
, emulate_sprite_flipping
);
1256 struct ureg_program
*ureg
= ureg_create( TGSI_PROCESSOR_FRAGMENT
);
1260 nvfx
->dummy_fs
= ureg_create_shader_and_destroy( ureg
, &nvfx
->pipe
);
1265 _debug_printf("Error: unable to create a dummy fragment shader: aborting.");
1270 fp
= nvfx_fragprog_translate(nvfx
, nvfx
->dummy_fs
, FALSE
);
1271 emulate_sprite_flipping
= FALSE
;
1275 _debug_printf("Error: unable to compile even a dummy fragment shader: aborting.");
1283 vp
= nvfx
->hw_vertprog
;
1285 if (fp
->last_vp_id
!= vp
->id
|| fp
->last_sprite_coord_enable
!= sprite_coord_enable
) {
1286 int sprite_real_input
= -1;
1287 int sprite_reloc_input
;
1289 fp
->last_vp_id
= vp
->id
;
1290 fp
->last_sprite_coord_enable
= sprite_coord_enable
;
1292 if(sprite_coord_enable
)
1294 sprite_real_input
= vp
->sprite_fp_input
;
1295 if(sprite_real_input
< 0)
1297 unsigned used_texcoords
= 0;
1298 for(unsigned i
= 0; i
< fp
->num_slots
; ++i
) {
1299 unsigned generic
= fp
->slot_to_generic
[i
];
1300 if((generic
< 32) && !((1 << generic
) & sprite_coord_enable
))
1302 unsigned char slot_mask
= vp
->generic_to_fp_input
[generic
];
1303 if(slot_mask
>= 0xf0)
1304 used_texcoords
|= 1 << ((slot_mask
& 0xf) - NVFX_FP_OP_INPUT_SRC_TC0
);
1308 sprite_real_input
= NVFX_FP_OP_INPUT_SRC_TC(__builtin_ctz(~used_texcoords
));
1311 fp
->point_sprite_control
|= (1 << (sprite_real_input
- NVFX_FP_OP_INPUT_SRC_TC0
+ 8));
1314 fp
->point_sprite_control
= 0;
1316 if(emulate_sprite_flipping
)
1317 sprite_reloc_input
= 0;
1319 sprite_reloc_input
= sprite_real_input
;
1321 for(i
= 0; i
< fp
->num_slots
; ++i
) {
1322 unsigned generic
= fp
->slot_to_generic
[i
];
1323 if((generic
< 32) && ((1 << generic
) & sprite_coord_enable
))
1325 if(fp
->slot_to_fp_input
[i
] != sprite_reloc_input
)
1330 unsigned char slot_mask
= vp
->generic_to_fp_input
[generic
];
1331 if((slot_mask
>> 4) & (slot_mask
^ fp
->slot_to_fp_input
[i
]))
1336 if(emulate_sprite_flipping
)
1338 if(fp
->slot_to_fp_input
[fp
->num_slots
] != sprite_real_input
)
1345 /* optimization: we start updating from the slot we found the first difference in */
1346 for(; i
< fp
->num_slots
; ++i
)
1348 unsigned generic
= fp
->slot_to_generic
[i
];
1349 if((generic
< 32) && ((1 << generic
) & sprite_coord_enable
))
1350 fp
->slot_to_fp_input
[i
] = sprite_reloc_input
;
1352 fp
->slot_to_fp_input
[i
] = vp
->generic_to_fp_input
[generic
] & 0xf;
1355 fp
->slot_to_fp_input
[fp
->num_slots
] = sprite_real_input
;
1360 for(i
= 0; i
<= fp
->num_slots
; ++i
) {
1361 unsigned fp_input
= fp
->slot_to_fp_input
[i
];
1362 if(fp_input
== NVFX_FP_OP_INPUT_SRC_TC(8))
1363 fp
->or |= (1 << 12);
1364 else if(fp_input
== NVFX_FP_OP_INPUT_SRC_TC(9))
1365 fp
->or |= (1 << 13);
1366 else if(fp_input
>= NVFX_FP_OP_INPUT_SRC_TC(0) && fp_input
<= NVFX_FP_OP_INPUT_SRC_TC(7))
1367 fp
->or |= (1 << (fp_input
- NVFX_FP_OP_INPUT_SRC_TC0
+ 14));
1371 fp
->progs_left_with_obsolete_slot_assignments
= fp
->progs
;
1376 /* We must update constants even on "just" fragprog changes, because
1377 * we don't check whether the current constant buffer matches the latest
1378 * one bound to this fragment program.
1379 * Doing such a check would likely be a pessimization.
1381 if ((nvfx
->hw_fragprog
!= fp
) || (nvfx
->dirty
& (NVFX_NEW_FRAGPROG
| NVFX_NEW_FRAGCONST
))) {
1387 if(fp
->bo_prog_idx
>= fp
->progs_per_bo
)
1389 if(fp
->fpbo
&& !nouveau_bo_busy(fp
->fpbo
->next
->bo
, NOUVEAU_BO_WR
))
1391 fp
->fpbo
= fp
->fpbo
->next
;
1395 struct nvfx_fragment_program_bo
* fpbo
= os_malloc_aligned(sizeof(struct nvfx_fragment_program
) + (fp
->prog_size
+ 8) * fp
->progs_per_bo
, 16);
1399 fpbo
->slots
= (unsigned char*)&fpbo
->insn
[(fp
->prog_size
) * fp
->progs_per_bo
];
1400 memset(fpbo
->slots
, 0, 8 * fp
->progs_per_bo
);
1403 fpbo
->next
= fp
->fpbo
->next
;
1404 fp
->fpbo
->next
= fpbo
;
1410 fp
->progs
+= fp
->progs_per_bo
;
1411 fp
->progs_left_with_obsolete_slot_assignments
+= fp
->progs_per_bo
;
1412 nouveau_bo_new(nvfx
->screen
->base
.device
, NOUVEAU_BO_VRAM
| NOUVEAU_BO_MAP
, 64, fp
->prog_size
* fp
->progs_per_bo
, &fpbo
->bo
);
1413 nouveau_bo_map(fpbo
->bo
, NOUVEAU_BO_NOSYNC
);
1415 map
= fpbo
->bo
->map
;
1416 buf
= (uint8_t*)fpbo
->insn
;
1417 for(unsigned i
= 0; i
< fp
->progs_per_bo
; ++i
)
1419 memcpy(buf
, fp
->insn
, fp
->insn_len
* 4);
1420 nvfx_fp_memcpy(map
, fp
->insn
, fp
->insn_len
* 4);
1421 map
+= fp
->prog_size
;
1422 buf
+= fp
->prog_size
;
1425 fp
->bo_prog_idx
= 0;
1428 offset
= fp
->bo_prog_idx
* fp
->prog_size
;
1429 fpmap
= (uint32_t*)((char*)fp
->fpbo
->bo
->map
+ offset
);
1431 if(nvfx
->constbuf
[PIPE_SHADER_FRAGMENT
]) {
1432 struct pipe_resource
* constbuf
= nvfx
->constbuf
[PIPE_SHADER_FRAGMENT
];
1433 uint32_t* map
= (uint32_t*)nvfx_buffer(constbuf
)->data
;
1434 uint32_t* fpmap
= (uint32_t*)((char*)fp
->fpbo
->bo
->map
+ offset
);
1435 uint32_t* buf
= (uint32_t*)((char*)fp
->fpbo
->insn
+ offset
);
1437 for (i
= 0; i
< fp
->nr_consts
; ++i
) {
1438 unsigned off
= fp
->consts
[i
].offset
;
1439 unsigned idx
= fp
->consts
[i
].index
* 4;
1441 /* TODO: is checking a good idea? */
1442 if(memcmp(&buf
[off
], &map
[idx
], 4 * sizeof(uint32_t))) {
1443 memcpy(&buf
[off
], &map
[idx
], 4 * sizeof(uint32_t));
1444 nvfx_fp_memcpy(&fpmap
[off
], &map
[idx
], 4 * sizeof(uint32_t));
1449 /* we only do this if we aren't sure that all program versions have the
1450 * current slot assignments, otherwise we just update constants for speed
1452 if(fp
->progs_left_with_obsolete_slot_assignments
) {
1453 unsigned char* fpbo_slots
= &fp
->fpbo
->slots
[fp
->bo_prog_idx
* 8];
1454 /* also relocate sprite coord slot, if any */
1455 for(unsigned i
= 0; i
<= fp
->num_slots
; ++i
) {
1456 unsigned value
= fp
->slot_to_fp_input
[i
];;
1457 if(value
!= fpbo_slots
[i
]) {
1459 unsigned* begin
= (unsigned*)fp
->slot_relocations
[i
].data
;
1460 unsigned* end
= (unsigned*)((char*)fp
->slot_relocations
[i
].data
+ fp
->slot_relocations
[i
].size
);
1461 //printf("fp %p reloc slot %u/%u: %u -> %u\n", fp, i, fp->num_slots, fpbo_slots[i], value);
1464 /* was relocated to an input, switch type to temporary */
1465 for(p
= begin
; p
!= end
; ++p
) {
1467 unsigned dw
= fp
->insn
[off
];
1468 dw
&=~ NVFX_FP_REG_TYPE_MASK
;
1469 //printf("reloc_tmp at %x\n", off);
1470 nvfx_fp_memcpy(&fpmap
[off
], &dw
, sizeof(dw
));
1475 /* was relocated to a temporary, switch type to input */
1476 for(p
= begin
; p
!= end
; ++p
) {
1478 unsigned dw
= fp
->insn
[off
];
1479 //printf("reloc_in at %x\n", off);
1480 dw
|= NVFX_FP_REG_TYPE_INPUT
<< NVFX_FP_REG_TYPE_SHIFT
;
1481 nvfx_fp_memcpy(&fpmap
[off
], &dw
, sizeof(dw
));
1485 /* set the correct input index */
1486 for(p
= begin
; p
!= end
; ++p
) {
1487 unsigned off
= *p
& ~3;
1488 unsigned dw
= fp
->insn
[off
];
1489 //printf("reloc&~3 at %x\n", off);
1490 dw
= (dw
& ~NVFX_FP_OP_INPUT_SRC_MASK
) | (value
<< NVFX_FP_OP_INPUT_SRC_SHIFT
);
1491 nvfx_fp_memcpy(&fpmap
[off
], &dw
, sizeof(dw
));
1494 fpbo_slots
[i
] = value
;
1497 --fp
->progs_left_with_obsolete_slot_assignments
;
1500 nvfx
->hw_fragprog
= fp
;
1502 MARK_RING(chan
, 8, 1);
1503 BEGIN_RING(chan
, eng3d
, NV30_3D_FP_ACTIVE_PROGRAM
, 1);
1504 OUT_RELOC(chan
, fp
->fpbo
->bo
, offset
, NOUVEAU_BO_VRAM
|
1505 NOUVEAU_BO_GART
| NOUVEAU_BO_RD
| NOUVEAU_BO_LOW
|
1506 NOUVEAU_BO_OR
, NV30_3D_FP_ACTIVE_PROGRAM_DMA0
,
1507 NV30_3D_FP_ACTIVE_PROGRAM_DMA1
);
1508 BEGIN_RING(chan
, eng3d
, NV30_3D_FP_CONTROL
, 1);
1509 OUT_RING(chan
, fp
->fp_control
);
1510 if(!nvfx
->is_nv4x
) {
1511 BEGIN_RING(chan
, eng3d
, NV30_3D_FP_REG_CONTROL
, 1);
1512 OUT_RING(chan
, (1<<16)|0x4);
1513 BEGIN_RING(chan
, eng3d
, NV30_3D_TEX_UNITS_ENABLE
, 1);
1514 OUT_RING(chan
, fp
->samplers
);
1519 unsigned pointsprite_control
= fp
->point_sprite_control
| nvfx
->rasterizer
->pipe
.point_quad_rasterization
;
1520 if(pointsprite_control
!= nvfx
->hw_pointsprite_control
)
1522 BEGIN_RING(chan
, eng3d
, NV30_3D_POINT_SPRITE
, 1);
1523 OUT_RING(chan
, pointsprite_control
);
1524 nvfx
->hw_pointsprite_control
= pointsprite_control
;
1528 nvfx
->relocs_needed
&=~ NVFX_RELOCATE_FRAGPROG
;
1532 nvfx_fragprog_relocate(struct nvfx_context
*nvfx
)
1534 struct nouveau_channel
* chan
= nvfx
->screen
->base
.channel
;
1535 struct nvfx_fragment_program
*fp
= nvfx
->hw_fragprog
;
1536 struct nouveau_bo
* bo
= fp
->fpbo
->bo
;
1537 int offset
= fp
->bo_prog_idx
* fp
->prog_size
;
1538 unsigned fp_flags
= NOUVEAU_BO_VRAM
| NOUVEAU_BO_RD
; // TODO: GART?
1539 fp_flags
|= NOUVEAU_BO_DUMMY
;
1540 MARK_RING(chan
, 2, 2);
1541 OUT_RELOC(chan
, bo
, RING_3D(NV30_3D_FP_ACTIVE_PROGRAM
, 1), fp_flags
, 0, 0);
1542 OUT_RELOC(chan
, bo
, offset
, fp_flags
| NOUVEAU_BO_LOW
|
1543 NOUVEAU_BO_OR
, NV30_3D_FP_ACTIVE_PROGRAM_DMA0
,
1544 NV30_3D_FP_ACTIVE_PROGRAM_DMA1
);
1545 nvfx
->relocs_needed
&=~ NVFX_RELOCATE_FRAGPROG
;
1549 nvfx_fragprog_destroy(struct nvfx_context
*nvfx
,
1550 struct nvfx_fragment_program
*fp
)
1553 struct nvfx_fragment_program_bo
* fpbo
= fp
->fpbo
;
1558 struct nvfx_fragment_program_bo
* next
= fpbo
->next
;
1559 nouveau_bo_unmap(fpbo
->bo
);
1560 nouveau_bo_ref(0, &fpbo
->bo
);
1561 os_free_aligned(fpbo
);
1564 while(fpbo
!= fp
->fpbo
);
1567 for(i
= 0; i
< Elements(fp
->slot_relocations
); ++i
)
1568 util_dynarray_fini(&fp
->slot_relocations
[i
]);
1575 nvfx_fp_state_create(struct pipe_context
*pipe
,
1576 const struct pipe_shader_state
*cso
)
1578 struct nvfx_pipe_fragment_program
*pfp
;
1580 pfp
= CALLOC(1, sizeof(struct nvfx_pipe_fragment_program
));
1581 pfp
->pipe
.tokens
= tgsi_dup_tokens(cso
->tokens
);
1583 tgsi_scan_shader(pfp
->pipe
.tokens
, &pfp
->info
);
1589 nvfx_fp_state_bind(struct pipe_context
*pipe
, void *hwcso
)
1591 struct nvfx_context
*nvfx
= nvfx_context(pipe
);
1593 nvfx
->fragprog
= hwcso
;
1594 nvfx
->dirty
|= NVFX_NEW_FRAGPROG
;
1598 nvfx_fp_state_delete(struct pipe_context
*pipe
, void *hwcso
)
1600 struct nvfx_context
*nvfx
= nvfx_context(pipe
);
1601 struct nvfx_pipe_fragment_program
*pfp
= hwcso
;
1604 for(i
= 0; i
< Elements(pfp
->fps
); ++i
)
1608 nvfx_fragprog_destroy(nvfx
, pfp
->fps
[i
]);
1613 FREE((void*)pfp
->pipe
.tokens
);
1618 nvfx_init_fragprog_functions(struct nvfx_context
*nvfx
)
1620 nvfx
->pipe
.create_fs_state
= nvfx_fp_state_create
;
1621 nvfx
->pipe
.bind_fs_state
= nvfx_fp_state_bind
;
1622 nvfx
->pipe
.delete_fs_state
= nvfx_fp_state_delete
;