1 /**************************************************************************
3 * Copyright 2011 Marek Olšák <maraeo@gmail.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "util/u_vbuf_mgr.h"
30 #include "util/u_format.h"
31 #include "util/u_inlines.h"
32 #include "util/u_memory.h"
33 #include "util/u_upload_mgr.h"
34 #include "translate/translate.h"
35 #include "translate/translate_cache.h"
37 /* Hardware vertex fetcher limitations can be described by this structure. */
39 /* Vertex format CAPs. */
40 /* TRUE if hardware supports it. */
41 unsigned format_fixed32
:1; /* PIPE_FORMAT_*32*_FIXED */
42 unsigned format_float16
:1; /* PIPE_FORMAT_*16*_FLOAT */
43 unsigned format_float64
:1; /* PIPE_FORMAT_*64*_FLOAT */
44 unsigned format_norm32
:1; /* PIPE_FORMAT_*32*NORM */
45 unsigned format_scaled32
:1; /* PIPE_FORMAT_*32*SCALED */
47 /* Whether vertex fetches don't have to be dword-aligned. */
48 /* TRUE if hardware supports it. */
49 unsigned fetch_dword_unaligned
:1;
52 struct u_vbuf_mgr_elements
{
54 struct pipe_vertex_element ve
[PIPE_MAX_ATTRIBS
];
56 unsigned src_format_size
[PIPE_MAX_ATTRIBS
];
58 /* If (velem[i].src_format != native_format[i]), the vertex buffer
59 * referenced by the vertex element cannot be used for rendering and
60 * its vertex data must be translated to native_format[i]. */
61 enum pipe_format native_format
[PIPE_MAX_ATTRIBS
];
62 unsigned native_format_size
[PIPE_MAX_ATTRIBS
];
64 /* This might mean two things:
65 * - src_format != native_format, as discussed above.
66 * - src_offset % 4 != 0 (if the caps don't allow such an offset). */
67 boolean incompatible_layout
;
70 struct u_vbuf_mgr_priv
{
72 struct u_vbuf_caps caps
;
73 struct pipe_context
*pipe
;
75 struct translate_cache
*translate_cache
;
76 unsigned translate_vb_slot
;
78 struct u_vbuf_mgr_elements
*ve
;
79 void *saved_ve
, *fallback_ve
;
80 boolean ve_binding_lock
;
82 unsigned saved_buffer_offset
[PIPE_MAX_ATTRIBS
];
85 boolean incompatible_vb_layout
;
88 static void u_vbuf_mgr_init_format_caps(struct u_vbuf_mgr_priv
*mgr
)
90 struct pipe_screen
*screen
= mgr
->pipe
->screen
;
92 mgr
->caps
.format_fixed32
=
93 screen
->is_format_supported(screen
, PIPE_FORMAT_R32_FIXED
, PIPE_BUFFER
,
94 0, PIPE_BIND_VERTEX_BUFFER
);
96 mgr
->caps
.format_float16
=
97 screen
->is_format_supported(screen
, PIPE_FORMAT_R16_FLOAT
, PIPE_BUFFER
,
98 0, PIPE_BIND_VERTEX_BUFFER
);
100 mgr
->caps
.format_float64
=
101 screen
->is_format_supported(screen
, PIPE_FORMAT_R64_FLOAT
, PIPE_BUFFER
,
102 0, PIPE_BIND_VERTEX_BUFFER
);
104 mgr
->caps
.format_norm32
=
105 screen
->is_format_supported(screen
, PIPE_FORMAT_R32_UNORM
, PIPE_BUFFER
,
106 0, PIPE_BIND_VERTEX_BUFFER
) &&
107 screen
->is_format_supported(screen
, PIPE_FORMAT_R32_SNORM
, PIPE_BUFFER
,
108 0, PIPE_BIND_VERTEX_BUFFER
);
110 mgr
->caps
.format_scaled32
=
111 screen
->is_format_supported(screen
, PIPE_FORMAT_R32_USCALED
, PIPE_BUFFER
,
112 0, PIPE_BIND_VERTEX_BUFFER
) &&
113 screen
->is_format_supported(screen
, PIPE_FORMAT_R32_SSCALED
, PIPE_BUFFER
,
114 0, PIPE_BIND_VERTEX_BUFFER
);
118 u_vbuf_mgr_create(struct pipe_context
*pipe
,
119 unsigned upload_buffer_size
,
120 unsigned upload_buffer_alignment
,
121 unsigned upload_buffer_bind
,
122 enum u_fetch_alignment fetch_alignment
)
124 struct u_vbuf_mgr_priv
*mgr
= CALLOC_STRUCT(u_vbuf_mgr_priv
);
127 mgr
->translate_cache
= translate_cache_create();
129 mgr
->b
.uploader
= u_upload_create(pipe
, upload_buffer_size
,
130 upload_buffer_alignment
,
133 mgr
->caps
.fetch_dword_unaligned
=
134 fetch_alignment
== U_VERTEX_FETCH_BYTE_ALIGNED
;
136 u_vbuf_mgr_init_format_caps(mgr
);
141 void u_vbuf_mgr_destroy(struct u_vbuf_mgr
*mgrb
)
143 struct u_vbuf_mgr_priv
*mgr
= (struct u_vbuf_mgr_priv
*)mgrb
;
146 for (i
= 0; i
< mgr
->b
.nr_real_vertex_buffers
; i
++) {
147 pipe_resource_reference(&mgr
->b
.vertex_buffer
[i
].buffer
, NULL
);
148 pipe_resource_reference(&mgr
->b
.real_vertex_buffer
[i
], NULL
);
151 translate_cache_destroy(mgr
->translate_cache
);
152 u_upload_destroy(mgr
->b
.uploader
);
157 static enum u_vbuf_return_flags
158 u_vbuf_translate_begin(struct u_vbuf_mgr_priv
*mgr
,
159 int min_index
, int max_index
)
161 struct translate_key key
;
162 struct translate_element
*te
;
163 unsigned tr_elem_index
[PIPE_MAX_ATTRIBS
];
164 struct translate
*tr
;
165 boolean vb_translated
[PIPE_MAX_ATTRIBS
] = {0};
166 uint8_t *vb_map
[PIPE_MAX_ATTRIBS
] = {0}, *out_map
;
167 struct pipe_transfer
*vb_transfer
[PIPE_MAX_ATTRIBS
] = {0};
168 struct pipe_resource
*out_buffer
= NULL
;
169 unsigned i
, num_verts
, out_offset
;
170 struct pipe_vertex_element new_velems
[PIPE_MAX_ATTRIBS
];
171 boolean upload_flushed
= FALSE
;
173 memset(&key
, 0, sizeof(key
));
174 memset(tr_elem_index
, 0xff, sizeof(tr_elem_index
));
176 /* Initialize the translate key, i.e. the recipe how vertices should be
178 memset(&key
, 0, sizeof key
);
179 for (i
= 0; i
< mgr
->ve
->count
; i
++) {
180 struct pipe_vertex_buffer
*vb
=
181 &mgr
->b
.vertex_buffer
[mgr
->ve
->ve
[i
].vertex_buffer_index
];
182 enum pipe_format output_format
= mgr
->ve
->native_format
[i
];
183 unsigned output_format_size
= mgr
->ve
->native_format_size
[i
];
185 /* Check for support. */
186 if (mgr
->ve
->ve
[i
].src_format
== mgr
->ve
->native_format
[i
] &&
187 (mgr
->caps
.fetch_dword_unaligned
||
188 (vb
->buffer_offset
% 4 == 0 &&
189 vb
->stride
% 4 == 0 &&
190 mgr
->ve
->ve
[i
].src_offset
% 4 == 0))) {
194 /* Workaround for translate: output floats instead of halfs. */
195 switch (output_format
) {
196 case PIPE_FORMAT_R16_FLOAT
:
197 output_format
= PIPE_FORMAT_R32_FLOAT
;
198 output_format_size
= 4;
200 case PIPE_FORMAT_R16G16_FLOAT
:
201 output_format
= PIPE_FORMAT_R32G32_FLOAT
;
202 output_format_size
= 8;
204 case PIPE_FORMAT_R16G16B16_FLOAT
:
205 output_format
= PIPE_FORMAT_R32G32B32_FLOAT
;
206 output_format_size
= 12;
208 case PIPE_FORMAT_R16G16B16A16_FLOAT
:
209 output_format
= PIPE_FORMAT_R32G32B32A32_FLOAT
;
210 output_format_size
= 16;
215 /* Add this vertex element. */
216 te
= &key
.element
[key
.nr_elements
];
218 te->instance_divisor;*/
219 te
->input_buffer
= mgr
->ve
->ve
[i
].vertex_buffer_index
;
220 te
->input_format
= mgr
->ve
->ve
[i
].src_format
;
221 te
->input_offset
= mgr
->ve
->ve
[i
].src_offset
;
222 te
->output_format
= output_format
;
223 te
->output_offset
= key
.output_stride
;
225 key
.output_stride
+= output_format_size
;
226 vb_translated
[mgr
->ve
->ve
[i
].vertex_buffer_index
] = TRUE
;
227 tr_elem_index
[i
] = key
.nr_elements
;
231 /* Get a translate object. */
232 tr
= translate_cache_find(mgr
->translate_cache
, &key
);
234 /* Map buffers we want to translate. */
235 for (i
= 0; i
< mgr
->b
.nr_vertex_buffers
; i
++) {
236 if (vb_translated
[i
]) {
237 struct pipe_vertex_buffer
*vb
= &mgr
->b
.vertex_buffer
[i
];
239 vb_map
[i
] = pipe_buffer_map(mgr
->pipe
, vb
->buffer
,
240 PIPE_TRANSFER_READ
, &vb_transfer
[i
]);
242 tr
->set_buffer(tr
, i
,
243 vb_map
[i
] + vb
->buffer_offset
+ vb
->stride
* min_index
,
248 /* Create and map the output buffer. */
249 num_verts
= max_index
+ 1 - min_index
;
251 u_upload_alloc(mgr
->b
.uploader
,
252 key
.output_stride
* min_index
,
253 key
.output_stride
* num_verts
,
254 &out_offset
, &out_buffer
, &upload_flushed
,
257 out_offset
-= key
.output_stride
* min_index
;
260 tr
->run(tr
, 0, num_verts
, 0, out_map
);
262 /* Unmap all buffers. */
263 for (i
= 0; i
< mgr
->b
.nr_vertex_buffers
; i
++) {
264 if (vb_translated
[i
]) {
265 pipe_buffer_unmap(mgr
->pipe
, vb_transfer
[i
]);
269 /* Setup the new vertex buffer in the first free slot. */
270 mgr
->translate_vb_slot
= ~0;
271 for (i
= 0; i
< PIPE_MAX_ATTRIBS
; i
++) {
272 if (!mgr
->b
.vertex_buffer
[i
].buffer
) {
273 mgr
->translate_vb_slot
= i
;
275 if (i
>= mgr
->b
.nr_vertex_buffers
) {
276 mgr
->b
.nr_real_vertex_buffers
= i
+1;
282 if (mgr
->translate_vb_slot
!= ~0) {
283 /* Setup the new vertex buffer. */
284 pipe_resource_reference(
285 &mgr
->b
.real_vertex_buffer
[mgr
->translate_vb_slot
], out_buffer
);
286 mgr
->b
.vertex_buffer
[mgr
->translate_vb_slot
].buffer_offset
= out_offset
;
287 mgr
->b
.vertex_buffer
[mgr
->translate_vb_slot
].stride
= key
.output_stride
;
289 /* Setup new vertex elements. */
290 for (i
= 0; i
< mgr
->ve
->count
; i
++) {
291 if (tr_elem_index
[i
] < key
.nr_elements
) {
292 te
= &key
.element
[tr_elem_index
[i
]];
293 new_velems
[i
].instance_divisor
= mgr
->ve
->ve
[i
].instance_divisor
;
294 new_velems
[i
].src_format
= te
->output_format
;
295 new_velems
[i
].src_offset
= te
->output_offset
;
296 new_velems
[i
].vertex_buffer_index
= mgr
->translate_vb_slot
;
298 memcpy(&new_velems
[i
], &mgr
->ve
->ve
[i
],
299 sizeof(struct pipe_vertex_element
));
304 mgr
->pipe
->create_vertex_elements_state(mgr
->pipe
, mgr
->ve
->count
,
307 /* Preserve saved_ve. */
308 mgr
->ve_binding_lock
= TRUE
;
309 mgr
->pipe
->bind_vertex_elements_state(mgr
->pipe
, mgr
->fallback_ve
);
310 mgr
->ve_binding_lock
= FALSE
;
313 pipe_resource_reference(&out_buffer
, NULL
);
315 return upload_flushed
? U_VBUF_UPLOAD_FLUSHED
: 0;
318 static void u_vbuf_translate_end(struct u_vbuf_mgr_priv
*mgr
)
320 if (mgr
->fallback_ve
== NULL
) {
324 /* Restore vertex elements. */
325 /* Note that saved_ve will be overwritten in bind_vertex_elements_state. */
326 mgr
->pipe
->bind_vertex_elements_state(mgr
->pipe
, mgr
->saved_ve
);
327 mgr
->pipe
->delete_vertex_elements_state(mgr
->pipe
, mgr
->fallback_ve
);
328 mgr
->fallback_ve
= NULL
;
330 /* Delete the now-unused VBO. */
331 pipe_resource_reference(&mgr
->b
.real_vertex_buffer
[mgr
->translate_vb_slot
],
333 mgr
->b
.nr_real_vertex_buffers
= mgr
->b
.nr_vertex_buffers
;
336 #define FORMAT_REPLACE(what, withwhat) \
337 case PIPE_FORMAT_##what: format = PIPE_FORMAT_##withwhat; break
339 struct u_vbuf_mgr_elements
*
340 u_vbuf_mgr_create_vertex_elements(struct u_vbuf_mgr
*mgrb
,
342 const struct pipe_vertex_element
*attribs
,
343 struct pipe_vertex_element
*native_attribs
)
345 struct u_vbuf_mgr_priv
*mgr
= (struct u_vbuf_mgr_priv
*)mgrb
;
347 struct u_vbuf_mgr_elements
*ve
= CALLOC_STRUCT(u_vbuf_mgr_elements
);
355 memcpy(ve
->ve
, attribs
, sizeof(struct pipe_vertex_element
) * count
);
356 memcpy(native_attribs
, attribs
, sizeof(struct pipe_vertex_element
) * count
);
358 /* Set the best native format in case the original format is not
360 for (i
= 0; i
< count
; i
++) {
361 enum pipe_format format
= ve
->ve
[i
].src_format
;
363 ve
->src_format_size
[i
] = util_format_get_blocksize(format
);
365 /* Choose a native format.
366 * For now we don't care about the alignment, that's going to
367 * be sorted out later. */
368 if (!mgr
->caps
.format_fixed32
) {
370 FORMAT_REPLACE(R32_FIXED
, R32_FLOAT
);
371 FORMAT_REPLACE(R32G32_FIXED
, R32G32_FLOAT
);
372 FORMAT_REPLACE(R32G32B32_FIXED
, R32G32B32_FLOAT
);
373 FORMAT_REPLACE(R32G32B32A32_FIXED
, R32G32B32A32_FLOAT
);
377 if (!mgr
->caps
.format_float16
) {
379 FORMAT_REPLACE(R16_FLOAT
, R32_FLOAT
);
380 FORMAT_REPLACE(R16G16_FLOAT
, R32G32_FLOAT
);
381 FORMAT_REPLACE(R16G16B16_FLOAT
, R32G32B32_FLOAT
);
382 FORMAT_REPLACE(R16G16B16A16_FLOAT
, R32G32B32A32_FLOAT
);
386 if (!mgr
->caps
.format_float64
) {
388 FORMAT_REPLACE(R64_FLOAT
, R32_FLOAT
);
389 FORMAT_REPLACE(R64G64_FLOAT
, R32G32_FLOAT
);
390 FORMAT_REPLACE(R64G64B64_FLOAT
, R32G32B32_FLOAT
);
391 FORMAT_REPLACE(R64G64B64A64_FLOAT
, R32G32B32A32_FLOAT
);
395 if (!mgr
->caps
.format_norm32
) {
397 FORMAT_REPLACE(R32_UNORM
, R32_FLOAT
);
398 FORMAT_REPLACE(R32G32_UNORM
, R32G32_FLOAT
);
399 FORMAT_REPLACE(R32G32B32_UNORM
, R32G32B32_FLOAT
);
400 FORMAT_REPLACE(R32G32B32A32_UNORM
, R32G32B32A32_FLOAT
);
401 FORMAT_REPLACE(R32_SNORM
, R32_FLOAT
);
402 FORMAT_REPLACE(R32G32_SNORM
, R32G32_FLOAT
);
403 FORMAT_REPLACE(R32G32B32_SNORM
, R32G32B32_FLOAT
);
404 FORMAT_REPLACE(R32G32B32A32_SNORM
, R32G32B32A32_FLOAT
);
408 if (!mgr
->caps
.format_scaled32
) {
410 FORMAT_REPLACE(R32_USCALED
, R32_FLOAT
);
411 FORMAT_REPLACE(R32G32_USCALED
, R32G32_FLOAT
);
412 FORMAT_REPLACE(R32G32B32_USCALED
, R32G32B32_FLOAT
);
413 FORMAT_REPLACE(R32G32B32A32_USCALED
,R32G32B32A32_FLOAT
);
414 FORMAT_REPLACE(R32_SSCALED
, R32_FLOAT
);
415 FORMAT_REPLACE(R32G32_SSCALED
, R32G32_FLOAT
);
416 FORMAT_REPLACE(R32G32B32_SSCALED
, R32G32B32_FLOAT
);
417 FORMAT_REPLACE(R32G32B32A32_SSCALED
,R32G32B32A32_FLOAT
);
422 native_attribs
[i
].src_format
= format
;
423 ve
->native_format
[i
] = format
;
424 ve
->native_format_size
[i
] =
425 util_format_get_blocksize(ve
->native_format
[i
]);
427 ve
->incompatible_layout
=
428 ve
->incompatible_layout
||
429 ve
->ve
[i
].src_format
!= ve
->native_format
[i
] ||
430 (!mgr
->caps
.fetch_dword_unaligned
&& ve
->ve
[i
].src_offset
% 4 != 0);
433 /* Align the formats to the size of DWORD if needed. */
434 if (!mgr
->caps
.fetch_dword_unaligned
) {
435 for (i
= 0; i
< count
; i
++) {
436 ve
->native_format_size
[i
] = align(ve
->native_format_size
[i
], 4);
443 void u_vbuf_mgr_bind_vertex_elements(struct u_vbuf_mgr
*mgrb
,
445 struct u_vbuf_mgr_elements
*ve
)
447 struct u_vbuf_mgr_priv
*mgr
= (struct u_vbuf_mgr_priv
*)mgrb
;
453 if (!mgr
->ve_binding_lock
) {
459 void u_vbuf_mgr_destroy_vertex_elements(struct u_vbuf_mgr
*mgr
,
460 struct u_vbuf_mgr_elements
*ve
)
465 void u_vbuf_mgr_set_vertex_buffers(struct u_vbuf_mgr
*mgrb
,
467 const struct pipe_vertex_buffer
*bufs
)
469 struct u_vbuf_mgr_priv
*mgr
= (struct u_vbuf_mgr_priv
*)mgrb
;
472 mgr
->any_user_vbs
= FALSE
;
473 mgr
->incompatible_vb_layout
= FALSE
;
475 if (!mgr
->caps
.fetch_dword_unaligned
) {
476 /* Check if the strides and offsets are aligned to the size of DWORD. */
477 for (i
= 0; i
< count
; i
++) {
478 if (bufs
[i
].buffer
) {
479 if (bufs
[i
].stride
% 4 != 0 ||
480 bufs
[i
].buffer_offset
% 4 != 0) {
481 mgr
->incompatible_vb_layout
= TRUE
;
488 for (i
= 0; i
< count
; i
++) {
489 const struct pipe_vertex_buffer
*vb
= &bufs
[i
];
491 pipe_resource_reference(&mgr
->b
.vertex_buffer
[i
].buffer
, vb
->buffer
);
492 pipe_resource_reference(&mgr
->b
.real_vertex_buffer
[i
], NULL
);
493 mgr
->saved_buffer_offset
[i
] = vb
->buffer_offset
;
499 if (u_vbuf_resource(vb
->buffer
)->user_ptr
) {
500 mgr
->any_user_vbs
= TRUE
;
504 pipe_resource_reference(&mgr
->b
.real_vertex_buffer
[i
], vb
->buffer
);
507 for (; i
< mgr
->b
.nr_real_vertex_buffers
; i
++) {
508 pipe_resource_reference(&mgr
->b
.vertex_buffer
[i
].buffer
, NULL
);
509 pipe_resource_reference(&mgr
->b
.real_vertex_buffer
[i
], NULL
);
512 memcpy(mgr
->b
.vertex_buffer
, bufs
,
513 sizeof(struct pipe_vertex_buffer
) * count
);
515 mgr
->b
.nr_vertex_buffers
= count
;
516 mgr
->b
.nr_real_vertex_buffers
= count
;
519 static enum u_vbuf_return_flags
520 u_vbuf_upload_buffers(struct u_vbuf_mgr_priv
*mgr
,
521 int min_index
, int max_index
,
522 unsigned instance_count
)
524 unsigned i
, nr
= mgr
->ve
->count
;
525 unsigned count
= max_index
+ 1 - min_index
;
526 boolean uploaded
[PIPE_MAX_ATTRIBS
] = {0};
527 enum u_vbuf_return_flags retval
= 0;
529 for (i
= 0; i
< nr
; i
++) {
530 unsigned index
= mgr
->ve
->ve
[i
].vertex_buffer_index
;
531 struct pipe_vertex_buffer
*vb
= &mgr
->b
.vertex_buffer
[index
];
534 u_vbuf_resource(vb
->buffer
)->user_ptr
&&
536 unsigned first
, size
;
538 unsigned instance_div
= mgr
->ve
->ve
[i
].instance_divisor
;
543 ((instance_count
+ instance_div
- 1) / instance_div
);
544 } else if (vb
->stride
) {
545 first
= vb
->stride
* min_index
;
546 size
= vb
->stride
* count
;
548 /* Unusual case when stride is smaller than the format size.
549 * XXX This won't work with interleaved arrays. */
550 if (mgr
->ve
->native_format_size
[i
] > vb
->stride
)
551 size
+= mgr
->ve
->native_format_size
[i
] - vb
->stride
;
554 size
= mgr
->ve
->native_format_size
[i
];
557 u_upload_data(mgr
->b
.uploader
, first
, size
,
558 u_vbuf_resource(vb
->buffer
)->user_ptr
+ first
,
560 &mgr
->b
.real_vertex_buffer
[index
],
563 vb
->buffer_offset
-= first
;
565 uploaded
[index
] = TRUE
;
567 retval
|= U_VBUF_UPLOAD_FLUSHED
;
569 assert(mgr
->b
.real_vertex_buffer
[index
]);
576 static void u_vbuf_mgr_compute_max_index(struct u_vbuf_mgr_priv
*mgr
)
578 unsigned i
, nr
= mgr
->ve
->count
;
580 mgr
->b
.max_index
= ~0;
582 for (i
= 0; i
< nr
; i
++) {
583 struct pipe_vertex_buffer
*vb
=
584 &mgr
->b
.vertex_buffer
[mgr
->ve
->ve
[i
].vertex_buffer_index
];
590 u_vbuf_resource(vb
->buffer
)->user_ptr
) {
594 /* How many bytes is unused after the last vertex.
595 * width0 may be "count*stride - unused" and we have to compensate
596 * for that when dividing by stride. */
597 unused
= vb
->stride
-
598 (mgr
->ve
->ve
[i
].src_offset
+ mgr
->ve
->src_format_size
[i
]);
600 /* If src_offset is greater than stride (which means it's a buffer
601 * offset rather than a vertex offset)... */
606 /* Compute the maximum index for this vertex element. */
608 (vb
->buffer
->width0
- vb
->buffer_offset
+ (unsigned)unused
) /
611 mgr
->b
.max_index
= MIN2(mgr
->b
.max_index
, max_index
);
615 enum u_vbuf_return_flags
616 u_vbuf_mgr_draw_begin(struct u_vbuf_mgr
*mgrb
,
617 const struct pipe_draw_info
*info
)
619 struct u_vbuf_mgr_priv
*mgr
= (struct u_vbuf_mgr_priv
*)mgrb
;
620 int min_index
, max_index
;
621 enum u_vbuf_return_flags retval
= 0;
623 u_vbuf_mgr_compute_max_index(mgr
);
625 min_index
= info
->min_index
- info
->index_bias
;
626 if (info
->max_index
== ~0) {
627 max_index
= mgr
->b
.max_index
;
629 max_index
= MIN2(info
->max_index
- info
->index_bias
, mgr
->b
.max_index
);
632 /* Translate vertices with non-native layouts or formats. */
633 if (mgr
->incompatible_vb_layout
|| mgr
->ve
->incompatible_layout
) {
634 retval
|= u_vbuf_translate_begin(mgr
, min_index
, max_index
);
636 if (mgr
->fallback_ve
) {
637 retval
|= U_VBUF_BUFFERS_UPDATED
;
641 /* Upload user buffers. */
642 if (mgr
->any_user_vbs
) {
643 retval
|= u_vbuf_upload_buffers(mgr
, min_index
, max_index
,
644 info
->instance_count
);
645 retval
|= U_VBUF_BUFFERS_UPDATED
;
650 void u_vbuf_mgr_draw_end(struct u_vbuf_mgr
*mgrb
)
652 struct u_vbuf_mgr_priv
*mgr
= (struct u_vbuf_mgr_priv
*)mgrb
;
655 /* buffer offsets were modified in u_vbuf_upload_buffers */
656 if (mgr
->any_user_vbs
) {
657 for (i
= 0; i
< mgr
->b
.nr_vertex_buffers
; i
++)
658 mgr
->b
.vertex_buffer
[i
].buffer_offset
= mgr
->saved_buffer_offset
[i
];
661 if (mgr
->fallback_ve
) {
662 u_vbuf_translate_end(mgr
);