1 /**************************************************************************
3 * Copyright 2011 Marek Olšák <maraeo@gmail.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "util/u_vbuf_mgr.h"
30 #include "util/u_format.h"
31 #include "util/u_inlines.h"
32 #include "util/u_memory.h"
33 #include "util/u_upload_mgr.h"
34 #include "translate/translate.h"
35 #include "translate/translate_cache.h"
37 /* Hardware vertex fetcher limitations can be described by this structure. */
39 /* Vertex format CAPs. */
40 /* TRUE if hardware supports it. */
41 unsigned format_fixed32
:1; /* PIPE_FORMAT_*32*_FIXED */
42 unsigned format_float16
:1; /* PIPE_FORMAT_*16*_FLOAT */
43 unsigned format_float64
:1; /* PIPE_FORMAT_*64*_FLOAT */
44 unsigned format_norm32
:1; /* PIPE_FORMAT_*32*NORM */
45 unsigned format_scaled32
:1; /* PIPE_FORMAT_*32*SCALED */
47 /* Whether vertex fetches don't have to be dword-aligned. */
48 /* TRUE if hardware supports it. */
49 unsigned fetch_dword_unaligned
:1;
52 struct u_vbuf_mgr_elements
{
54 struct pipe_vertex_element ve
[PIPE_MAX_ATTRIBS
];
56 unsigned src_format_size
[PIPE_MAX_ATTRIBS
];
58 /* If (velem[i].src_format != native_format[i]), the vertex buffer
59 * referenced by the vertex element cannot be used for rendering and
60 * its vertex data must be translated to native_format[i]. */
61 enum pipe_format native_format
[PIPE_MAX_ATTRIBS
];
62 unsigned native_format_size
[PIPE_MAX_ATTRIBS
];
64 /* This might mean two things:
65 * - src_format != native_format, as discussed above.
66 * - src_offset % 4 != 0 (if the caps don't allow such an offset). */
67 boolean incompatible_layout
;
70 struct u_vbuf_mgr_priv
{
72 struct u_vbuf_caps caps
;
73 struct pipe_context
*pipe
;
75 struct translate_cache
*translate_cache
;
76 unsigned translate_vb_slot
;
78 struct u_vbuf_mgr_elements
*ve
;
79 void *saved_ve
, *fallback_ve
;
80 boolean ve_binding_lock
;
83 boolean incompatible_vb_layout
;
86 static void u_vbuf_mgr_init_format_caps(struct u_vbuf_mgr_priv
*mgr
)
88 struct pipe_screen
*screen
= mgr
->pipe
->screen
;
90 mgr
->caps
.format_fixed32
=
91 screen
->is_format_supported(screen
, PIPE_FORMAT_R32_FIXED
, PIPE_BUFFER
,
92 0, PIPE_BIND_VERTEX_BUFFER
);
94 mgr
->caps
.format_float16
=
95 screen
->is_format_supported(screen
, PIPE_FORMAT_R16_FLOAT
, PIPE_BUFFER
,
96 0, PIPE_BIND_VERTEX_BUFFER
);
98 mgr
->caps
.format_float64
=
99 screen
->is_format_supported(screen
, PIPE_FORMAT_R64_FLOAT
, PIPE_BUFFER
,
100 0, PIPE_BIND_VERTEX_BUFFER
);
102 mgr
->caps
.format_norm32
=
103 screen
->is_format_supported(screen
, PIPE_FORMAT_R32_UNORM
, PIPE_BUFFER
,
104 0, PIPE_BIND_VERTEX_BUFFER
) &&
105 screen
->is_format_supported(screen
, PIPE_FORMAT_R32_SNORM
, PIPE_BUFFER
,
106 0, PIPE_BIND_VERTEX_BUFFER
);
108 mgr
->caps
.format_scaled32
=
109 screen
->is_format_supported(screen
, PIPE_FORMAT_R32_USCALED
, PIPE_BUFFER
,
110 0, PIPE_BIND_VERTEX_BUFFER
) &&
111 screen
->is_format_supported(screen
, PIPE_FORMAT_R32_SSCALED
, PIPE_BUFFER
,
112 0, PIPE_BIND_VERTEX_BUFFER
);
116 u_vbuf_mgr_create(struct pipe_context
*pipe
,
117 unsigned upload_buffer_size
,
118 unsigned upload_buffer_alignment
,
119 unsigned upload_buffer_bind
,
120 enum u_fetch_alignment fetch_alignment
)
122 struct u_vbuf_mgr_priv
*mgr
= CALLOC_STRUCT(u_vbuf_mgr_priv
);
125 mgr
->translate_cache
= translate_cache_create();
127 mgr
->b
.uploader
= u_upload_create(pipe
, upload_buffer_size
,
128 upload_buffer_alignment
,
131 mgr
->caps
.fetch_dword_unaligned
=
132 fetch_alignment
== U_VERTEX_FETCH_BYTE_ALIGNED
;
134 u_vbuf_mgr_init_format_caps(mgr
);
139 void u_vbuf_mgr_destroy(struct u_vbuf_mgr
*mgrb
)
141 struct u_vbuf_mgr_priv
*mgr
= (struct u_vbuf_mgr_priv
*)mgrb
;
144 for (i
= 0; i
< mgr
->b
.nr_real_vertex_buffers
; i
++) {
145 pipe_resource_reference(&mgr
->b
.vertex_buffer
[i
].buffer
, NULL
);
146 pipe_resource_reference(&mgr
->b
.real_vertex_buffer
[i
], NULL
);
149 translate_cache_destroy(mgr
->translate_cache
);
150 u_upload_destroy(mgr
->b
.uploader
);
155 static enum u_vbuf_return_flags
156 u_vbuf_translate_begin(struct u_vbuf_mgr_priv
*mgr
,
157 int min_index
, int max_index
)
159 struct translate_key key
;
160 struct translate_element
*te
;
161 unsigned tr_elem_index
[PIPE_MAX_ATTRIBS
];
162 struct translate
*tr
;
163 boolean vb_translated
[PIPE_MAX_ATTRIBS
] = {0};
164 uint8_t *vb_map
[PIPE_MAX_ATTRIBS
] = {0}, *out_map
;
165 struct pipe_transfer
*vb_transfer
[PIPE_MAX_ATTRIBS
] = {0};
166 struct pipe_resource
*out_buffer
= NULL
;
167 unsigned i
, num_verts
, out_offset
;
168 struct pipe_vertex_element new_velems
[PIPE_MAX_ATTRIBS
];
169 boolean upload_flushed
= FALSE
;
171 memset(&key
, 0, sizeof(key
));
172 memset(tr_elem_index
, 0xff, sizeof(tr_elem_index
));
174 /* Initialize the translate key, i.e. the recipe how vertices should be
176 memset(&key
, 0, sizeof key
);
177 for (i
= 0; i
< mgr
->ve
->count
; i
++) {
178 struct pipe_vertex_buffer
*vb
=
179 &mgr
->b
.vertex_buffer
[mgr
->ve
->ve
[i
].vertex_buffer_index
];
180 enum pipe_format output_format
= mgr
->ve
->native_format
[i
];
181 unsigned output_format_size
= mgr
->ve
->native_format_size
[i
];
183 /* Check for support. */
184 if (mgr
->ve
->ve
[i
].src_format
== mgr
->ve
->native_format
[i
] &&
185 (mgr
->caps
.fetch_dword_unaligned
||
186 (vb
->buffer_offset
% 4 == 0 &&
187 vb
->stride
% 4 == 0 &&
188 mgr
->ve
->ve
[i
].src_offset
% 4 == 0))) {
192 /* Workaround for translate: output floats instead of halfs. */
193 switch (output_format
) {
194 case PIPE_FORMAT_R16_FLOAT
:
195 output_format
= PIPE_FORMAT_R32_FLOAT
;
196 output_format_size
= 4;
198 case PIPE_FORMAT_R16G16_FLOAT
:
199 output_format
= PIPE_FORMAT_R32G32_FLOAT
;
200 output_format_size
= 8;
202 case PIPE_FORMAT_R16G16B16_FLOAT
:
203 output_format
= PIPE_FORMAT_R32G32B32_FLOAT
;
204 output_format_size
= 12;
206 case PIPE_FORMAT_R16G16B16A16_FLOAT
:
207 output_format
= PIPE_FORMAT_R32G32B32A32_FLOAT
;
208 output_format_size
= 16;
213 /* Add this vertex element. */
214 te
= &key
.element
[key
.nr_elements
];
216 te->instance_divisor;*/
217 te
->input_buffer
= mgr
->ve
->ve
[i
].vertex_buffer_index
;
218 te
->input_format
= mgr
->ve
->ve
[i
].src_format
;
219 te
->input_offset
= mgr
->ve
->ve
[i
].src_offset
;
220 te
->output_format
= output_format
;
221 te
->output_offset
= key
.output_stride
;
223 key
.output_stride
+= output_format_size
;
224 vb_translated
[mgr
->ve
->ve
[i
].vertex_buffer_index
] = TRUE
;
225 tr_elem_index
[i
] = key
.nr_elements
;
229 /* Get a translate object. */
230 tr
= translate_cache_find(mgr
->translate_cache
, &key
);
232 /* Map buffers we want to translate. */
233 for (i
= 0; i
< mgr
->b
.nr_vertex_buffers
; i
++) {
234 if (vb_translated
[i
]) {
235 struct pipe_vertex_buffer
*vb
= &mgr
->b
.vertex_buffer
[i
];
237 vb_map
[i
] = pipe_buffer_map(mgr
->pipe
, vb
->buffer
,
238 PIPE_TRANSFER_READ
, &vb_transfer
[i
]);
240 tr
->set_buffer(tr
, i
,
241 vb_map
[i
] + vb
->buffer_offset
+ vb
->stride
* min_index
,
246 /* Create and map the output buffer. */
247 num_verts
= max_index
+ 1 - min_index
;
249 u_upload_alloc(mgr
->b
.uploader
,
250 key
.output_stride
* min_index
,
251 key
.output_stride
* num_verts
,
252 &out_offset
, &out_buffer
, &upload_flushed
,
255 out_offset
-= key
.output_stride
* min_index
;
258 tr
->run(tr
, 0, num_verts
, 0, out_map
);
260 /* Unmap all buffers. */
261 for (i
= 0; i
< mgr
->b
.nr_vertex_buffers
; i
++) {
262 if (vb_translated
[i
]) {
263 pipe_buffer_unmap(mgr
->pipe
, vb_transfer
[i
]);
267 /* Setup the new vertex buffer in the first free slot. */
268 mgr
->translate_vb_slot
= ~0;
269 for (i
= 0; i
< PIPE_MAX_ATTRIBS
; i
++) {
270 if (!mgr
->b
.vertex_buffer
[i
].buffer
) {
271 mgr
->translate_vb_slot
= i
;
273 if (i
>= mgr
->b
.nr_vertex_buffers
) {
274 mgr
->b
.nr_real_vertex_buffers
= i
+1;
280 if (mgr
->translate_vb_slot
!= ~0) {
281 /* Setup the new vertex buffer. */
282 pipe_resource_reference(
283 &mgr
->b
.real_vertex_buffer
[mgr
->translate_vb_slot
], out_buffer
);
284 mgr
->b
.vertex_buffer
[mgr
->translate_vb_slot
].buffer_offset
= out_offset
;
285 mgr
->b
.vertex_buffer
[mgr
->translate_vb_slot
].stride
= key
.output_stride
;
287 /* Setup new vertex elements. */
288 for (i
= 0; i
< mgr
->ve
->count
; i
++) {
289 if (tr_elem_index
[i
] < key
.nr_elements
) {
290 te
= &key
.element
[tr_elem_index
[i
]];
291 new_velems
[i
].instance_divisor
= mgr
->ve
->ve
[i
].instance_divisor
;
292 new_velems
[i
].src_format
= te
->output_format
;
293 new_velems
[i
].src_offset
= te
->output_offset
;
294 new_velems
[i
].vertex_buffer_index
= mgr
->translate_vb_slot
;
296 memcpy(&new_velems
[i
], &mgr
->ve
->ve
[i
],
297 sizeof(struct pipe_vertex_element
));
302 mgr
->pipe
->create_vertex_elements_state(mgr
->pipe
, mgr
->ve
->count
,
305 /* Preserve saved_ve. */
306 mgr
->ve_binding_lock
= TRUE
;
307 mgr
->pipe
->bind_vertex_elements_state(mgr
->pipe
, mgr
->fallback_ve
);
308 mgr
->ve_binding_lock
= FALSE
;
311 pipe_resource_reference(&out_buffer
, NULL
);
313 return upload_flushed
? U_VBUF_UPLOAD_FLUSHED
: 0;
316 static void u_vbuf_translate_end(struct u_vbuf_mgr_priv
*mgr
)
318 if (mgr
->fallback_ve
== NULL
) {
322 /* Restore vertex elements. */
323 /* Note that saved_ve will be overwritten in bind_vertex_elements_state. */
324 mgr
->pipe
->bind_vertex_elements_state(mgr
->pipe
, mgr
->saved_ve
);
325 mgr
->pipe
->delete_vertex_elements_state(mgr
->pipe
, mgr
->fallback_ve
);
326 mgr
->fallback_ve
= NULL
;
328 /* Delete the now-unused VBO. */
329 pipe_resource_reference(&mgr
->b
.real_vertex_buffer
[mgr
->translate_vb_slot
],
331 mgr
->b
.nr_real_vertex_buffers
= mgr
->b
.nr_vertex_buffers
;
334 #define FORMAT_REPLACE(what, withwhat) \
335 case PIPE_FORMAT_##what: format = PIPE_FORMAT_##withwhat; break
337 struct u_vbuf_mgr_elements
*
338 u_vbuf_mgr_create_vertex_elements(struct u_vbuf_mgr
*mgrb
,
340 const struct pipe_vertex_element
*attribs
,
341 struct pipe_vertex_element
*native_attribs
)
343 struct u_vbuf_mgr_priv
*mgr
= (struct u_vbuf_mgr_priv
*)mgrb
;
345 struct u_vbuf_mgr_elements
*ve
= CALLOC_STRUCT(u_vbuf_mgr_elements
);
353 memcpy(ve
->ve
, attribs
, sizeof(struct pipe_vertex_element
) * count
);
354 memcpy(native_attribs
, attribs
, sizeof(struct pipe_vertex_element
) * count
);
356 /* Set the best native format in case the original format is not
358 for (i
= 0; i
< count
; i
++) {
359 enum pipe_format format
= ve
->ve
[i
].src_format
;
361 ve
->src_format_size
[i
] = util_format_get_blocksize(format
);
363 /* Choose a native format.
364 * For now we don't care about the alignment, that's going to
365 * be sorted out later. */
366 if (!mgr
->caps
.format_fixed32
) {
368 FORMAT_REPLACE(R32_FIXED
, R32_FLOAT
);
369 FORMAT_REPLACE(R32G32_FIXED
, R32G32_FLOAT
);
370 FORMAT_REPLACE(R32G32B32_FIXED
, R32G32B32_FLOAT
);
371 FORMAT_REPLACE(R32G32B32A32_FIXED
, R32G32B32A32_FLOAT
);
375 if (!mgr
->caps
.format_float16
) {
377 FORMAT_REPLACE(R16_FLOAT
, R32_FLOAT
);
378 FORMAT_REPLACE(R16G16_FLOAT
, R32G32_FLOAT
);
379 FORMAT_REPLACE(R16G16B16_FLOAT
, R32G32B32_FLOAT
);
380 FORMAT_REPLACE(R16G16B16A16_FLOAT
, R32G32B32A32_FLOAT
);
384 if (!mgr
->caps
.format_float64
) {
386 FORMAT_REPLACE(R64_FLOAT
, R32_FLOAT
);
387 FORMAT_REPLACE(R64G64_FLOAT
, R32G32_FLOAT
);
388 FORMAT_REPLACE(R64G64B64_FLOAT
, R32G32B32_FLOAT
);
389 FORMAT_REPLACE(R64G64B64A64_FLOAT
, R32G32B32A32_FLOAT
);
393 if (!mgr
->caps
.format_norm32
) {
395 FORMAT_REPLACE(R32_UNORM
, R32_FLOAT
);
396 FORMAT_REPLACE(R32G32_UNORM
, R32G32_FLOAT
);
397 FORMAT_REPLACE(R32G32B32_UNORM
, R32G32B32_FLOAT
);
398 FORMAT_REPLACE(R32G32B32A32_UNORM
, R32G32B32A32_FLOAT
);
399 FORMAT_REPLACE(R32_SNORM
, R32_FLOAT
);
400 FORMAT_REPLACE(R32G32_SNORM
, R32G32_FLOAT
);
401 FORMAT_REPLACE(R32G32B32_SNORM
, R32G32B32_FLOAT
);
402 FORMAT_REPLACE(R32G32B32A32_SNORM
, R32G32B32A32_FLOAT
);
406 if (!mgr
->caps
.format_scaled32
) {
408 FORMAT_REPLACE(R32_USCALED
, R32_FLOAT
);
409 FORMAT_REPLACE(R32G32_USCALED
, R32G32_FLOAT
);
410 FORMAT_REPLACE(R32G32B32_USCALED
, R32G32B32_FLOAT
);
411 FORMAT_REPLACE(R32G32B32A32_USCALED
,R32G32B32A32_FLOAT
);
412 FORMAT_REPLACE(R32_SSCALED
, R32_FLOAT
);
413 FORMAT_REPLACE(R32G32_SSCALED
, R32G32_FLOAT
);
414 FORMAT_REPLACE(R32G32B32_SSCALED
, R32G32B32_FLOAT
);
415 FORMAT_REPLACE(R32G32B32A32_SSCALED
,R32G32B32A32_FLOAT
);
420 native_attribs
[i
].src_format
= format
;
421 ve
->native_format
[i
] = format
;
422 ve
->native_format_size
[i
] =
423 util_format_get_blocksize(ve
->native_format
[i
]);
425 ve
->incompatible_layout
=
426 ve
->incompatible_layout
||
427 ve
->ve
[i
].src_format
!= ve
->native_format
[i
] ||
428 (!mgr
->caps
.fetch_dword_unaligned
&& ve
->ve
[i
].src_offset
% 4 != 0);
431 /* Align the formats to the size of DWORD if needed. */
432 if (!mgr
->caps
.fetch_dword_unaligned
) {
433 for (i
= 0; i
< count
; i
++) {
434 ve
->native_format_size
[i
] = align(ve
->native_format_size
[i
], 4);
441 void u_vbuf_mgr_bind_vertex_elements(struct u_vbuf_mgr
*mgrb
,
443 struct u_vbuf_mgr_elements
*ve
)
445 struct u_vbuf_mgr_priv
*mgr
= (struct u_vbuf_mgr_priv
*)mgrb
;
451 if (!mgr
->ve_binding_lock
) {
457 void u_vbuf_mgr_destroy_vertex_elements(struct u_vbuf_mgr
*mgr
,
458 struct u_vbuf_mgr_elements
*ve
)
463 void u_vbuf_mgr_set_vertex_buffers(struct u_vbuf_mgr
*mgrb
,
465 const struct pipe_vertex_buffer
*bufs
)
467 struct u_vbuf_mgr_priv
*mgr
= (struct u_vbuf_mgr_priv
*)mgrb
;
470 mgr
->any_user_vbs
= FALSE
;
471 mgr
->incompatible_vb_layout
= FALSE
;
473 if (!mgr
->caps
.fetch_dword_unaligned
) {
474 /* Check if the strides and offsets are aligned to the size of DWORD. */
475 for (i
= 0; i
< count
; i
++) {
476 if (bufs
[i
].buffer
) {
477 if (bufs
[i
].stride
% 4 != 0 ||
478 bufs
[i
].buffer_offset
% 4 != 0) {
479 mgr
->incompatible_vb_layout
= TRUE
;
486 for (i
= 0; i
< count
; i
++) {
487 const struct pipe_vertex_buffer
*vb
= &bufs
[i
];
489 pipe_resource_reference(&mgr
->b
.vertex_buffer
[i
].buffer
, vb
->buffer
);
490 pipe_resource_reference(&mgr
->b
.real_vertex_buffer
[i
], NULL
);
496 if (u_vbuf_resource(vb
->buffer
)->user_ptr
) {
497 mgr
->any_user_vbs
= TRUE
;
501 pipe_resource_reference(&mgr
->b
.real_vertex_buffer
[i
], vb
->buffer
);
504 for (; i
< mgr
->b
.nr_real_vertex_buffers
; i
++) {
505 pipe_resource_reference(&mgr
->b
.vertex_buffer
[i
].buffer
, NULL
);
506 pipe_resource_reference(&mgr
->b
.real_vertex_buffer
[i
], NULL
);
509 memcpy(mgr
->b
.vertex_buffer
, bufs
,
510 sizeof(struct pipe_vertex_buffer
) * count
);
512 mgr
->b
.nr_vertex_buffers
= count
;
513 mgr
->b
.nr_real_vertex_buffers
= count
;
516 static enum u_vbuf_return_flags
517 u_vbuf_upload_buffers(struct u_vbuf_mgr_priv
*mgr
,
518 int min_index
, int max_index
,
519 unsigned instance_count
)
521 unsigned i
, nr
= mgr
->ve
->count
;
522 unsigned count
= max_index
+ 1 - min_index
;
523 boolean uploaded
[PIPE_MAX_ATTRIBS
] = {0};
524 enum u_vbuf_return_flags retval
= 0;
526 for (i
= 0; i
< nr
; i
++) {
527 unsigned index
= mgr
->ve
->ve
[i
].vertex_buffer_index
;
528 struct pipe_vertex_buffer
*vb
= &mgr
->b
.vertex_buffer
[index
];
531 u_vbuf_resource(vb
->buffer
)->user_ptr
&&
533 unsigned first
, size
;
535 unsigned instance_div
= mgr
->ve
->ve
[i
].instance_divisor
;
540 ((instance_count
+ instance_div
- 1) / instance_div
);
541 } else if (vb
->stride
) {
542 first
= vb
->stride
* min_index
;
543 size
= vb
->stride
* count
;
545 /* Unusual case when stride is smaller than the format size.
546 * XXX This won't work with interleaved arrays. */
547 if (mgr
->ve
->native_format_size
[i
] > vb
->stride
)
548 size
+= mgr
->ve
->native_format_size
[i
] - vb
->stride
;
551 size
= mgr
->ve
->native_format_size
[i
];
554 u_upload_data(mgr
->b
.uploader
, first
, size
,
555 u_vbuf_resource(vb
->buffer
)->user_ptr
+ first
,
557 &mgr
->b
.real_vertex_buffer
[index
],
560 vb
->buffer_offset
-= first
;
562 uploaded
[index
] = TRUE
;
564 retval
|= U_VBUF_UPLOAD_FLUSHED
;
566 assert(mgr
->b
.real_vertex_buffer
[index
]);
573 static void u_vbuf_mgr_compute_max_index(struct u_vbuf_mgr_priv
*mgr
)
575 unsigned i
, nr
= mgr
->ve
->count
;
577 mgr
->b
.max_index
= ~0;
579 for (i
= 0; i
< nr
; i
++) {
580 struct pipe_vertex_buffer
*vb
=
581 &mgr
->b
.vertex_buffer
[mgr
->ve
->ve
[i
].vertex_buffer_index
];
587 u_vbuf_resource(vb
->buffer
)->user_ptr
) {
591 /* How many bytes is unused after the last vertex.
592 * width0 may be "count*stride - unused" and we have to compensate
593 * for that when dividing by stride. */
594 unused
= vb
->stride
-
595 (mgr
->ve
->ve
[i
].src_offset
+ mgr
->ve
->src_format_size
[i
]);
597 /* If src_offset is greater than stride (which means it's a buffer
598 * offset rather than a vertex offset)... */
603 /* Compute the maximum index for this vertex element. */
605 (vb
->buffer
->width0
- vb
->buffer_offset
+ (unsigned)unused
) /
608 mgr
->b
.max_index
= MIN2(mgr
->b
.max_index
, max_index
);
612 enum u_vbuf_return_flags
613 u_vbuf_mgr_draw_begin(struct u_vbuf_mgr
*mgrb
,
614 const struct pipe_draw_info
*info
)
616 struct u_vbuf_mgr_priv
*mgr
= (struct u_vbuf_mgr_priv
*)mgrb
;
617 int min_index
, max_index
;
618 enum u_vbuf_return_flags retval
= 0;
620 u_vbuf_mgr_compute_max_index(mgr
);
622 min_index
= info
->min_index
- info
->index_bias
;
623 if (info
->max_index
== ~0) {
624 max_index
= mgr
->b
.max_index
;
626 max_index
= MIN2(info
->max_index
- info
->index_bias
, mgr
->b
.max_index
);
629 /* Translate vertices with non-native layouts or formats. */
630 if (mgr
->incompatible_vb_layout
|| mgr
->ve
->incompatible_layout
) {
631 retval
|= u_vbuf_translate_begin(mgr
, min_index
, max_index
);
633 if (mgr
->fallback_ve
) {
634 retval
|= U_VBUF_BUFFERS_UPDATED
;
638 /* Upload user buffers. */
639 if (mgr
->any_user_vbs
) {
640 retval
|= u_vbuf_upload_buffers(mgr
, min_index
, max_index
,
641 info
->instance_count
);
642 retval
|= U_VBUF_BUFFERS_UPDATED
;
647 void u_vbuf_mgr_draw_end(struct u_vbuf_mgr
*mgrb
)
649 struct u_vbuf_mgr_priv
*mgr
= (struct u_vbuf_mgr_priv
*)mgrb
;
651 if (mgr
->fallback_ve
) {
652 u_vbuf_translate_end(mgr
);