1 #include "pipe/p_context.h"
2 #include "pipe/p_state.h"
3 #include "util/u_inlines.h"
4 #include "util/u_format.h"
6 #include "nouveau/nouveau_util.h"
7 #include "nv50_context.h"
10 struct nv50_context
*nv50
;
25 void (*push
)(struct nouveau_channel
*, void *);
31 emit_b32_1(struct nouveau_channel
*chan
, void *data
)
39 emit_b32_2(struct nouveau_channel
*chan
, void *data
)
48 emit_b32_3(struct nouveau_channel
*chan
, void *data
)
58 emit_b32_4(struct nouveau_channel
*chan
, void *data
)
69 emit_b16_1(struct nouveau_channel
*chan
, void *data
)
77 emit_b16_3(struct nouveau_channel
*chan
, void *data
)
81 OUT_RING(chan
, (v
[1] << 16) | v
[0]);
86 emit_b08_1(struct nouveau_channel
*chan
, void *data
)
94 emit_b08_3(struct nouveau_channel
*chan
, void *data
)
98 OUT_RING(chan
, (v
[2] << 16) | (v
[1] << 8) | v
[0]);
102 emit_vertex(struct push_context
*ctx
, unsigned n
)
104 struct nouveau_grobj
*tesla
= ctx
->nv50
->screen
->tesla
;
105 struct nouveau_channel
*chan
= tesla
->channel
;
108 if (ctx
->edgeflag_attr
< 16) {
109 float *edgeflag
= ctx
->attr
[ctx
->edgeflag_attr
].map
+
110 ctx
->attr
[ctx
->edgeflag_attr
].stride
* n
;
112 if (*edgeflag
!= ctx
->edgeflag
) {
113 BEGIN_RING(chan
, tesla
, NV50TCL_EDGEFLAG_ENABLE
, 1);
114 OUT_RING (chan
, *edgeflag
? 1 : 0);
115 ctx
->edgeflag
= *edgeflag
;
119 BEGIN_RING_NI(chan
, tesla
, NV50TCL_VERTEX_DATA
, ctx
->vtx_size
);
120 for (i
= 0; i
< ctx
->attr_nr
; i
++)
121 ctx
->attr
[i
].push(chan
, ctx
->attr
[i
].map
+ ctx
->attr
[i
].stride
* n
);
125 emit_edgeflag(void *priv
, boolean enabled
)
127 struct push_context
*ctx
= priv
;
128 struct nouveau_grobj
*tesla
= ctx
->nv50
->screen
->tesla
;
129 struct nouveau_channel
*chan
= tesla
->channel
;
131 BEGIN_RING(chan
, tesla
, NV50TCL_EDGEFLAG_ENABLE
, 1);
132 OUT_RING (chan
, enabled
? 1 : 0);
136 emit_elt08(void *priv
, unsigned start
, unsigned count
)
138 struct push_context
*ctx
= priv
;
139 uint8_t *idxbuf
= ctx
->idxbuf
;
142 emit_vertex(ctx
, idxbuf
[start
++]);
146 emit_elt16(void *priv
, unsigned start
, unsigned count
)
148 struct push_context
*ctx
= priv
;
149 uint16_t *idxbuf
= ctx
->idxbuf
;
152 emit_vertex(ctx
, idxbuf
[start
++]);
156 emit_elt32(void *priv
, unsigned start
, unsigned count
)
158 struct push_context
*ctx
= priv
;
159 uint32_t *idxbuf
= ctx
->idxbuf
;
162 emit_vertex(ctx
, idxbuf
[start
++]);
166 emit_verts(void *priv
, unsigned start
, unsigned count
)
169 emit_vertex(priv
, start
++);
173 nv50_push_elements_instanced(struct pipe_context
*pipe
,
174 struct pipe_buffer
*idxbuf
, unsigned idxsize
,
175 unsigned mode
, unsigned start
, unsigned count
,
176 unsigned i_start
, unsigned i_count
)
178 struct nv50_context
*nv50
= nv50_context(pipe
);
179 struct nouveau_grobj
*tesla
= nv50
->screen
->tesla
;
180 struct nouveau_channel
*chan
= tesla
->channel
;
181 struct push_context ctx
;
182 const unsigned p_overhead
= 4 + /* begin/end */
183 4; /* potential edgeflag enable/disable */
184 const unsigned v_overhead
= 1 + /* VERTEX_DATA packet header */
185 2; /* potential edgeflag modification */
186 struct u_split_prim s
;
196 ctx
.edgeflag_attr
= nv50
->vertprog
->cfg
.edgeflag_in
;
198 /* map vertex buffers, determine vertex size */
199 for (i
= 0; i
< nv50
->vtxelt
->num_elements
; i
++) {
200 struct pipe_vertex_element
*ve
= &nv50
->vtxelt
->pipe
[i
];
201 struct pipe_vertex_buffer
*vb
= &nv50
->vtxbuf
[ve
->vertex_buffer_index
];
202 struct nouveau_bo
*bo
= nouveau_bo(vb
->buffer
);
203 unsigned size
, nr_components
, n
;
205 if (!(nv50
->vbo_fifo
& (1 << i
)))
209 if (nouveau_bo_map(bo
, NOUVEAU_BO_RD
)) {
213 ctx
.attr
[n
].map
= bo
->map
+ vb
->buffer_offset
+ ve
->src_offset
;
214 nouveau_bo_unmap(bo
);
216 ctx
.attr
[n
].stride
= vb
->stride
;
217 ctx
.attr
[n
].divisor
= ve
->instance_divisor
;
218 if (ctx
.attr
[n
].divisor
) {
219 ctx
.attr
[n
].step
= i_start
% ve
->instance_divisor
;
220 ctx
.attr
[n
].map
+= i_start
* vb
->stride
;
223 size
= util_format_get_component_bits(ve
->src_format
,
224 UTIL_FORMAT_COLORSPACE_RGB
, 0);
225 nr_components
= util_format_get_nr_components(ve
->src_format
);
228 switch (nr_components
) {
229 case 1: ctx
.attr
[n
].push
= emit_b08_1
; break;
230 case 2: ctx
.attr
[n
].push
= emit_b16_1
; break;
231 case 3: ctx
.attr
[n
].push
= emit_b08_3
; break;
232 case 4: ctx
.attr
[n
].push
= emit_b32_1
; break;
237 switch (nr_components
) {
238 case 1: ctx
.attr
[n
].push
= emit_b16_1
; break;
239 case 2: ctx
.attr
[n
].push
= emit_b32_1
; break;
240 case 3: ctx
.attr
[n
].push
= emit_b16_3
; break;
241 case 4: ctx
.attr
[n
].push
= emit_b32_2
; break;
243 ctx
.vtx_size
+= (nr_components
+ 1) >> 1;
246 switch (nr_components
) {
247 case 1: ctx
.attr
[n
].push
= emit_b32_1
; break;
248 case 2: ctx
.attr
[n
].push
= emit_b32_2
; break;
249 case 3: ctx
.attr
[n
].push
= emit_b32_3
; break;
250 case 4: ctx
.attr
[n
].push
= emit_b32_4
; break;
252 ctx
.vtx_size
+= nr_components
;
259 vtx_size
= ctx
.vtx_size
+ v_overhead
;
261 /* map index buffer, if present */
263 struct nouveau_bo
*bo
= nouveau_bo(idxbuf
);
265 if (nouveau_bo_map(bo
, NOUVEAU_BO_RD
)) {
269 ctx
.idxbuf
= bo
->map
;
270 ctx
.idxsize
= idxsize
;
271 nouveau_bo_unmap(bo
);
275 s
.edge
= emit_edgeflag
;
287 /* per-instance loop */
288 BEGIN_RING(chan
, tesla
, NV50TCL_CB_ADDR
, 2);
289 OUT_RING (chan
, NV50_CB_AUX
| (24 << 8));
290 OUT_RING (chan
, i_start
);
295 for (i
= 0; i
< ctx
.attr_nr
; i
++) {
296 if (!ctx
.attr
[i
].divisor
||
297 ctx
.attr
[i
].divisor
!= ++ctx
.attr
[i
].step
)
299 ctx
.attr
[i
].step
= 0;
300 ctx
.attr
[i
].map
+= ctx
.attr
[i
].stride
;
303 u_split_prim_init(&s
, mode
, start
, count
);
305 if (AVAIL_RING(chan
) < p_overhead
+ (6 * vtx_size
)) {
307 if (!nv50_state_validate(nv50
, p_overhead
+ (6 * vtx_size
))) {
313 max_verts
= AVAIL_RING(chan
);
314 max_verts
-= p_overhead
;
315 max_verts
/= vtx_size
;
317 BEGIN_RING(chan
, tesla
, NV50TCL_VERTEX_BEGIN
, 1);
318 OUT_RING (chan
, nv50_prim(s
.mode
) | (nzi
? (1 << 28) : 0));
319 done
= u_split_prim_next(&s
, max_verts
);
320 BEGIN_RING(chan
, tesla
, NV50TCL_VERTEX_END
, 1);