1 /**************************************************************************
3 * Copyright 2011 Marek Olšák <maraeo@gmail.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 * This module uploads user buffers and translates the vertex buffers which
30 * contain incompatible vertices (i.e. not supported by the driver/hardware)
31 * into compatible ones, based on the Gallium CAPs.
33 * It does not upload index buffers.
35 * The module heavily uses bitmasks to represent per-buffer and
36 * per-vertex-element flags to avoid looping over the list of buffers just
37 * to see if there's a non-zero stride, or user buffer, or unsupported format,
40 * There are 3 categories of vertex elements, which are processed separately:
41 * - per-vertex attribs (stride != 0, instance_divisor == 0)
42 * - instanced attribs (stride != 0, instance_divisor > 0)
43 * - constant attribs (stride == 0)
45 * All needed uploads and translations are performed every draw command, but
46 * only the subset of vertices needed for that draw command is uploaded or
47 * translated. (the module never translates whole buffers)
50 * The module consists of two main parts:
53 * 1) Translate (u_vbuf_translate_begin/end)
55 * This is pretty much a vertex fetch fallback. It translates vertices from
56 * one vertex buffer to another in an unused vertex buffer slot. It does
57 * whatever is needed to make the vertices readable by the hardware (changes
58 * vertex formats and aligns offsets and strides). The translate module is
61 * Each of the 3 categories is translated to a separate buffer.
62 * Only the [min_index, max_index] range is translated. For instanced attribs,
63 * the range is [start_instance, start_instance+instance_count]. For constant
64 * attribs, the range is [0, 1].
67 * 2) User buffer uploading (u_vbuf_upload_buffers)
69 * Only the [min_index, max_index] range is uploaded (just like Translate)
70 * with a single memcpy.
72 * This method works best for non-indexed draw operations or indexed draw
73 * operations where the [min_index, max_index] range is not being way bigger
74 * than the vertex count.
76 * If the range is too big (e.g. one triangle with indices {0, 1, 10000}),
77 * the per-vertex attribs are uploaded via the translate module, all packed
78 * into one vertex buffer, and the indexed draw call is turned into
79 * a non-indexed one in the process. This adds additional complexity
80 * to the translate part, but it prevents bad apps from bringing your frame
84 * If there is nothing to do, it forwards every command to the driver.
85 * The module also has its own CSO cache of vertex element states.
88 #include "util/u_vbuf.h"
90 #include "util/u_dump.h"
91 #include "util/u_format.h"
92 #include "util/u_inlines.h"
93 #include "util/u_memory.h"
94 #include "util/u_upload_mgr.h"
95 #include "translate/translate.h"
96 #include "translate/translate_cache.h"
97 #include "cso_cache/cso_cache.h"
98 #include "cso_cache/cso_hash.h"
100 struct u_vbuf_elements
{
102 struct pipe_vertex_element ve
[PIPE_MAX_ATTRIBS
];
104 unsigned src_format_size
[PIPE_MAX_ATTRIBS
];
106 /* If (velem[i].src_format != native_format[i]), the vertex buffer
107 * referenced by the vertex element cannot be used for rendering and
108 * its vertex data must be translated to native_format[i]. */
109 enum pipe_format native_format
[PIPE_MAX_ATTRIBS
];
110 unsigned native_format_size
[PIPE_MAX_ATTRIBS
];
112 /* Which buffers are used by the vertex element state. */
113 uint32_t used_vb_mask
;
114 /* This might mean two things:
115 * - src_format != native_format, as discussed above.
116 * - src_offset % 4 != 0 (if the caps don't allow such an offset). */
117 uint32_t incompatible_elem_mask
; /* each bit describes a corresp. attrib */
118 /* Which buffer has at least one vertex element referencing it
120 uint32_t incompatible_vb_mask_any
;
121 /* Which buffer has all vertex elements referencing it incompatible. */
122 uint32_t incompatible_vb_mask_all
;
123 /* Which buffer has at least one vertex element referencing it
125 uint32_t compatible_vb_mask_any
;
126 /* Which buffer has all vertex elements referencing it compatible. */
127 uint32_t compatible_vb_mask_all
;
129 /* Which buffer has at least one vertex element referencing it
131 uint32_t noninstance_vb_mask_any
;
144 struct u_vbuf_caps caps
;
146 struct pipe_context
*pipe
;
147 struct translate_cache
*translate_cache
;
148 struct cso_cache
*cso_cache
;
150 /* This is what was set in set_vertex_buffers.
151 * May contain user buffers. */
152 struct pipe_vertex_buffer vertex_buffer
[PIPE_MAX_ATTRIBS
];
153 uint32_t enabled_vb_mask
;
155 /* Saved vertex buffer. */
156 unsigned aux_vertex_buffer_slot
;
157 struct pipe_vertex_buffer aux_vertex_buffer_saved
;
159 /* Vertex buffers for the driver.
160 * There are usually no user buffers. */
161 struct pipe_vertex_buffer real_vertex_buffer
[PIPE_MAX_ATTRIBS
];
162 uint32_t dirty_real_vb_mask
; /* which buffers are dirty since the last
163 call of set_vertex_buffers */
165 /* The index buffer. */
166 struct pipe_index_buffer index_buffer
;
168 /* Vertex elements. */
169 struct u_vbuf_elements
*ve
, *ve_saved
;
171 /* Vertex elements used for the translate fallback. */
172 struct pipe_vertex_element fallback_velems
[PIPE_MAX_ATTRIBS
];
173 /* If non-NULL, this is a vertex element state used for the translate
174 * fallback and therefore used for rendering too. */
175 boolean using_translate
;
176 /* The vertex buffer slot index where translated vertices have been
178 unsigned fallback_vbs
[VB_NUM
];
180 /* Which buffer is a user buffer. */
181 uint32_t user_vb_mask
; /* each bit describes a corresp. buffer */
182 /* Which buffer is incompatible (unaligned). */
183 uint32_t incompatible_vb_mask
; /* each bit describes a corresp. buffer */
184 /* Which buffer has a non-zero stride. */
185 uint32_t nonzero_stride_vb_mask
; /* each bit describes a corresp. buffer */
189 u_vbuf_create_vertex_elements(struct u_vbuf
*mgr
, unsigned count
,
190 const struct pipe_vertex_element
*attribs
);
191 static void u_vbuf_delete_vertex_elements(struct u_vbuf
*mgr
, void *cso
);
193 static const struct {
194 enum pipe_format from
, to
;
195 } vbuf_format_fallbacks
[] = {
196 { PIPE_FORMAT_R32_FIXED
, PIPE_FORMAT_R32_FLOAT
},
197 { PIPE_FORMAT_R32G32_FIXED
, PIPE_FORMAT_R32G32_FLOAT
},
198 { PIPE_FORMAT_R32G32B32_FIXED
, PIPE_FORMAT_R32G32B32_FLOAT
},
199 { PIPE_FORMAT_R32G32B32A32_FIXED
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
200 { PIPE_FORMAT_R16_FLOAT
, PIPE_FORMAT_R32_FLOAT
},
201 { PIPE_FORMAT_R16G16_FLOAT
, PIPE_FORMAT_R32G32_FLOAT
},
202 { PIPE_FORMAT_R16G16B16_FLOAT
, PIPE_FORMAT_R32G32B32_FLOAT
},
203 { PIPE_FORMAT_R16G16B16A16_FLOAT
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
204 { PIPE_FORMAT_R64_FLOAT
, PIPE_FORMAT_R32_FLOAT
},
205 { PIPE_FORMAT_R64G64_FLOAT
, PIPE_FORMAT_R32G32_FLOAT
},
206 { PIPE_FORMAT_R64G64B64_FLOAT
, PIPE_FORMAT_R32G32B32_FLOAT
},
207 { PIPE_FORMAT_R64G64B64A64_FLOAT
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
208 { PIPE_FORMAT_R32_UNORM
, PIPE_FORMAT_R32_FLOAT
},
209 { PIPE_FORMAT_R32G32_UNORM
, PIPE_FORMAT_R32G32_FLOAT
},
210 { PIPE_FORMAT_R32G32B32_UNORM
, PIPE_FORMAT_R32G32B32_FLOAT
},
211 { PIPE_FORMAT_R32G32B32A32_UNORM
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
212 { PIPE_FORMAT_R32_SNORM
, PIPE_FORMAT_R32_FLOAT
},
213 { PIPE_FORMAT_R32G32_SNORM
, PIPE_FORMAT_R32G32_FLOAT
},
214 { PIPE_FORMAT_R32G32B32_SNORM
, PIPE_FORMAT_R32G32B32_FLOAT
},
215 { PIPE_FORMAT_R32G32B32A32_SNORM
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
216 { PIPE_FORMAT_R32_USCALED
, PIPE_FORMAT_R32_FLOAT
},
217 { PIPE_FORMAT_R32G32_USCALED
, PIPE_FORMAT_R32G32_FLOAT
},
218 { PIPE_FORMAT_R32G32B32_USCALED
, PIPE_FORMAT_R32G32B32_FLOAT
},
219 { PIPE_FORMAT_R32G32B32A32_USCALED
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
220 { PIPE_FORMAT_R32_SSCALED
, PIPE_FORMAT_R32_FLOAT
},
221 { PIPE_FORMAT_R32G32_SSCALED
, PIPE_FORMAT_R32G32_FLOAT
},
222 { PIPE_FORMAT_R32G32B32_SSCALED
, PIPE_FORMAT_R32G32B32_FLOAT
},
223 { PIPE_FORMAT_R32G32B32A32_SSCALED
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
224 { PIPE_FORMAT_R16_UNORM
, PIPE_FORMAT_R32_FLOAT
},
225 { PIPE_FORMAT_R16G16_UNORM
, PIPE_FORMAT_R32G32_FLOAT
},
226 { PIPE_FORMAT_R16G16B16_UNORM
, PIPE_FORMAT_R32G32B32_FLOAT
},
227 { PIPE_FORMAT_R16G16B16A16_UNORM
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
228 { PIPE_FORMAT_R16_SNORM
, PIPE_FORMAT_R32_FLOAT
},
229 { PIPE_FORMAT_R16G16_SNORM
, PIPE_FORMAT_R32G32_FLOAT
},
230 { PIPE_FORMAT_R16G16B16_SNORM
, PIPE_FORMAT_R32G32B32_FLOAT
},
231 { PIPE_FORMAT_R16G16B16A16_SNORM
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
232 { PIPE_FORMAT_R16_USCALED
, PIPE_FORMAT_R32_FLOAT
},
233 { PIPE_FORMAT_R16G16_USCALED
, PIPE_FORMAT_R32G32_FLOAT
},
234 { PIPE_FORMAT_R16G16B16_USCALED
, PIPE_FORMAT_R32G32B32_FLOAT
},
235 { PIPE_FORMAT_R16G16B16A16_USCALED
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
236 { PIPE_FORMAT_R16_SSCALED
, PIPE_FORMAT_R32_FLOAT
},
237 { PIPE_FORMAT_R16G16_SSCALED
, PIPE_FORMAT_R32G32_FLOAT
},
238 { PIPE_FORMAT_R16G16B16_SSCALED
, PIPE_FORMAT_R32G32B32_FLOAT
},
239 { PIPE_FORMAT_R16G16B16A16_SSCALED
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
240 { PIPE_FORMAT_R8_UNORM
, PIPE_FORMAT_R32_FLOAT
},
241 { PIPE_FORMAT_R8G8_UNORM
, PIPE_FORMAT_R32G32_FLOAT
},
242 { PIPE_FORMAT_R8G8B8_UNORM
, PIPE_FORMAT_R32G32B32_FLOAT
},
243 { PIPE_FORMAT_R8G8B8A8_UNORM
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
244 { PIPE_FORMAT_R8_SNORM
, PIPE_FORMAT_R32_FLOAT
},
245 { PIPE_FORMAT_R8G8_SNORM
, PIPE_FORMAT_R32G32_FLOAT
},
246 { PIPE_FORMAT_R8G8B8_SNORM
, PIPE_FORMAT_R32G32B32_FLOAT
},
247 { PIPE_FORMAT_R8G8B8A8_SNORM
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
248 { PIPE_FORMAT_R8_USCALED
, PIPE_FORMAT_R32_FLOAT
},
249 { PIPE_FORMAT_R8G8_USCALED
, PIPE_FORMAT_R32G32_FLOAT
},
250 { PIPE_FORMAT_R8G8B8_USCALED
, PIPE_FORMAT_R32G32B32_FLOAT
},
251 { PIPE_FORMAT_R8G8B8A8_USCALED
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
252 { PIPE_FORMAT_R8_SSCALED
, PIPE_FORMAT_R32_FLOAT
},
253 { PIPE_FORMAT_R8G8_SSCALED
, PIPE_FORMAT_R32G32_FLOAT
},
254 { PIPE_FORMAT_R8G8B8_SSCALED
, PIPE_FORMAT_R32G32B32_FLOAT
},
255 { PIPE_FORMAT_R8G8B8A8_SSCALED
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
258 boolean
u_vbuf_get_caps(struct pipe_screen
*screen
, struct u_vbuf_caps
*caps
,
262 boolean fallback
= FALSE
;
264 /* I'd rather have a bitfield of which formats are supported and a static
265 * table of the translations indexed by format, but since we don't have C99
266 * we can't easily make a sparsely-populated table indexed by format. So,
267 * we construct the sparse table here.
269 for (i
= 0; i
< PIPE_FORMAT_COUNT
; i
++)
270 caps
->format_translation
[i
] = i
;
272 for (i
= 0; i
< ARRAY_SIZE(vbuf_format_fallbacks
); i
++) {
273 enum pipe_format format
= vbuf_format_fallbacks
[i
].from
;
275 if (!screen
->is_format_supported(screen
, format
, PIPE_BUFFER
, 0,
276 PIPE_BIND_VERTEX_BUFFER
)) {
277 caps
->format_translation
[format
] = vbuf_format_fallbacks
[i
].to
;
282 caps
->buffer_offset_unaligned
=
283 !screen
->get_param(screen
,
284 PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY
);
285 caps
->buffer_stride_unaligned
=
286 !screen
->get_param(screen
,
287 PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY
);
288 caps
->velem_src_offset_unaligned
=
289 !screen
->get_param(screen
,
290 PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY
);
291 caps
->user_vertex_buffers
=
292 screen
->get_param(screen
, PIPE_CAP_USER_VERTEX_BUFFERS
);
294 if (!caps
->buffer_offset_unaligned
||
295 !caps
->buffer_stride_unaligned
||
296 !caps
->velem_src_offset_unaligned
||
297 (!(flags
& U_VBUF_FLAG_NO_USER_VBOS
) && !caps
->user_vertex_buffers
)) {
305 u_vbuf_create(struct pipe_context
*pipe
,
306 struct u_vbuf_caps
*caps
, unsigned aux_vertex_buffer_index
)
308 struct u_vbuf
*mgr
= CALLOC_STRUCT(u_vbuf
);
311 mgr
->aux_vertex_buffer_slot
= aux_vertex_buffer_index
;
313 mgr
->cso_cache
= cso_cache_create();
314 mgr
->translate_cache
= translate_cache_create();
315 memset(mgr
->fallback_vbs
, ~0, sizeof(mgr
->fallback_vbs
));
320 /* u_vbuf uses its own caching for vertex elements, because it needs to keep
321 * its own preprocessed state per vertex element CSO. */
322 static struct u_vbuf_elements
*
323 u_vbuf_set_vertex_elements_internal(struct u_vbuf
*mgr
, unsigned count
,
324 const struct pipe_vertex_element
*states
)
326 struct pipe_context
*pipe
= mgr
->pipe
;
327 unsigned key_size
, hash_key
;
328 struct cso_hash_iter iter
;
329 struct u_vbuf_elements
*ve
;
330 struct cso_velems_state velems_state
;
332 /* need to include the count into the stored state data too. */
333 key_size
= sizeof(struct pipe_vertex_element
) * count
+ sizeof(unsigned);
334 velems_state
.count
= count
;
335 memcpy(velems_state
.velems
, states
,
336 sizeof(struct pipe_vertex_element
) * count
);
337 hash_key
= cso_construct_key((void*)&velems_state
, key_size
);
338 iter
= cso_find_state_template(mgr
->cso_cache
, hash_key
, CSO_VELEMENTS
,
339 (void*)&velems_state
, key_size
);
341 if (cso_hash_iter_is_null(iter
)) {
342 struct cso_velements
*cso
= MALLOC_STRUCT(cso_velements
);
343 memcpy(&cso
->state
, &velems_state
, key_size
);
344 cso
->data
= u_vbuf_create_vertex_elements(mgr
, count
, states
);
345 cso
->delete_state
= (cso_state_callback
)u_vbuf_delete_vertex_elements
;
346 cso
->context
= (void*)mgr
;
348 iter
= cso_insert_state(mgr
->cso_cache
, hash_key
, CSO_VELEMENTS
, cso
);
351 ve
= ((struct cso_velements
*)cso_hash_iter_data(iter
))->data
;
357 pipe
->bind_vertex_elements_state(pipe
, ve
->driver_cso
);
362 void u_vbuf_set_vertex_elements(struct u_vbuf
*mgr
, unsigned count
,
363 const struct pipe_vertex_element
*states
)
365 mgr
->ve
= u_vbuf_set_vertex_elements_internal(mgr
, count
, states
);
368 void u_vbuf_destroy(struct u_vbuf
*mgr
)
370 struct pipe_screen
*screen
= mgr
->pipe
->screen
;
372 unsigned num_vb
= screen
->get_shader_param(screen
, PIPE_SHADER_VERTEX
,
373 PIPE_SHADER_CAP_MAX_INPUTS
);
375 mgr
->pipe
->set_index_buffer(mgr
->pipe
, NULL
);
376 pipe_resource_reference(&mgr
->index_buffer
.buffer
, NULL
);
378 mgr
->pipe
->set_vertex_buffers(mgr
->pipe
, 0, num_vb
, NULL
);
380 for (i
= 0; i
< PIPE_MAX_ATTRIBS
; i
++)
381 pipe_vertex_buffer_unreference(&mgr
->vertex_buffer
[i
]);
382 for (i
= 0; i
< PIPE_MAX_ATTRIBS
; i
++)
383 pipe_vertex_buffer_unreference(&mgr
->real_vertex_buffer
[i
]);
385 pipe_vertex_buffer_unreference(&mgr
->aux_vertex_buffer_saved
);
387 translate_cache_destroy(mgr
->translate_cache
);
388 cso_cache_delete(mgr
->cso_cache
);
392 static enum pipe_error
393 u_vbuf_translate_buffers(struct u_vbuf
*mgr
, struct translate_key
*key
,
394 unsigned vb_mask
, unsigned out_vb
,
395 int start_vertex
, unsigned num_vertices
,
396 int start_index
, unsigned num_indices
, int min_index
,
397 boolean unroll_indices
)
399 struct translate
*tr
;
400 struct pipe_transfer
*vb_transfer
[PIPE_MAX_ATTRIBS
] = {0};
401 struct pipe_resource
*out_buffer
= NULL
;
403 unsigned out_offset
, mask
;
405 /* Get a translate object. */
406 tr
= translate_cache_find(mgr
->translate_cache
, key
);
408 /* Map buffers we want to translate. */
411 struct pipe_vertex_buffer
*vb
;
414 unsigned i
= u_bit_scan(&mask
);
416 vb
= &mgr
->vertex_buffer
[i
];
417 offset
= vb
->buffer_offset
+ vb
->stride
* start_vertex
;
419 if (vb
->is_user_buffer
) {
420 map
= (uint8_t*)vb
->buffer
.user
+ offset
;
422 unsigned size
= vb
->stride
? num_vertices
* vb
->stride
425 if (offset
+size
> vb
->buffer
.resource
->width0
) {
426 size
= vb
->buffer
.resource
->width0
- offset
;
429 map
= pipe_buffer_map_range(mgr
->pipe
, vb
->buffer
.resource
, offset
, size
,
430 PIPE_TRANSFER_READ
, &vb_transfer
[i
]);
433 /* Subtract min_index so that indexing with the index buffer works. */
434 if (unroll_indices
) {
435 map
-= (ptrdiff_t)vb
->stride
* min_index
;
438 tr
->set_buffer(tr
, i
, map
, vb
->stride
, ~0);
442 if (unroll_indices
) {
443 struct pipe_index_buffer
*ib
= &mgr
->index_buffer
;
444 struct pipe_transfer
*transfer
= NULL
;
445 unsigned offset
= ib
->offset
+ start_index
* ib
->index_size
;
448 assert((ib
->buffer
|| ib
->user_buffer
) && ib
->index_size
);
450 /* Create and map the output buffer. */
451 u_upload_alloc(mgr
->pipe
->stream_uploader
, 0,
452 key
->output_stride
* num_indices
, 4,
453 &out_offset
, &out_buffer
,
456 return PIPE_ERROR_OUT_OF_MEMORY
;
458 if (ib
->user_buffer
) {
459 map
= (uint8_t*)ib
->user_buffer
+ offset
;
461 map
= pipe_buffer_map_range(mgr
->pipe
, ib
->buffer
, offset
,
462 num_indices
* ib
->index_size
,
463 PIPE_TRANSFER_READ
, &transfer
);
466 switch (ib
->index_size
) {
468 tr
->run_elts(tr
, (unsigned*)map
, num_indices
, 0, 0, out_map
);
471 tr
->run_elts16(tr
, (uint16_t*)map
, num_indices
, 0, 0, out_map
);
474 tr
->run_elts8(tr
, map
, num_indices
, 0, 0, out_map
);
479 pipe_buffer_unmap(mgr
->pipe
, transfer
);
482 /* Create and map the output buffer. */
483 u_upload_alloc(mgr
->pipe
->stream_uploader
,
484 key
->output_stride
* start_vertex
,
485 key
->output_stride
* num_vertices
, 4,
486 &out_offset
, &out_buffer
,
489 return PIPE_ERROR_OUT_OF_MEMORY
;
491 out_offset
-= key
->output_stride
* start_vertex
;
493 tr
->run(tr
, 0, num_vertices
, 0, 0, out_map
);
496 /* Unmap all buffers. */
499 unsigned i
= u_bit_scan(&mask
);
501 if (vb_transfer
[i
]) {
502 pipe_buffer_unmap(mgr
->pipe
, vb_transfer
[i
]);
506 /* Setup the new vertex buffer. */
507 mgr
->real_vertex_buffer
[out_vb
].buffer_offset
= out_offset
;
508 mgr
->real_vertex_buffer
[out_vb
].stride
= key
->output_stride
;
510 /* Move the buffer reference. */
511 pipe_resource_reference(
512 &mgr
->real_vertex_buffer
[out_vb
].buffer
.resource
, NULL
);
513 mgr
->real_vertex_buffer
[out_vb
].buffer
.resource
= out_buffer
;
519 u_vbuf_translate_find_free_vb_slots(struct u_vbuf
*mgr
,
520 unsigned mask
[VB_NUM
])
523 unsigned fallback_vbs
[VB_NUM
];
524 /* Set the bit for each buffer which is incompatible, or isn't set. */
525 uint32_t unused_vb_mask
=
526 mgr
->ve
->incompatible_vb_mask_all
| mgr
->incompatible_vb_mask
|
527 ~mgr
->enabled_vb_mask
;
529 memset(fallback_vbs
, ~0, sizeof(fallback_vbs
));
531 /* Find free slots for each type if needed. */
532 for (type
= 0; type
< VB_NUM
; type
++) {
536 if (!unused_vb_mask
) {
540 index
= ffs(unused_vb_mask
) - 1;
541 fallback_vbs
[type
] = index
;
542 unused_vb_mask
&= ~(1 << index
);
543 /*printf("found slot=%i for type=%i\n", index, type);*/
547 for (type
= 0; type
< VB_NUM
; type
++) {
549 mgr
->dirty_real_vb_mask
|= 1 << fallback_vbs
[type
];
553 memcpy(mgr
->fallback_vbs
, fallback_vbs
, sizeof(fallback_vbs
));
558 u_vbuf_translate_begin(struct u_vbuf
*mgr
,
559 int start_vertex
, unsigned num_vertices
,
560 int start_instance
, unsigned num_instances
,
561 int start_index
, unsigned num_indices
, int min_index
,
562 boolean unroll_indices
)
564 unsigned mask
[VB_NUM
] = {0};
565 struct translate_key key
[VB_NUM
];
566 unsigned elem_index
[VB_NUM
][PIPE_MAX_ATTRIBS
]; /* ... into key.elements */
568 unsigned incompatible_vb_mask
= mgr
->incompatible_vb_mask
&
569 mgr
->ve
->used_vb_mask
;
571 int start
[VB_NUM
] = {
572 start_vertex
, /* VERTEX */
573 start_instance
, /* INSTANCE */
577 unsigned num
[VB_NUM
] = {
578 num_vertices
, /* VERTEX */
579 num_instances
, /* INSTANCE */
583 memset(key
, 0, sizeof(key
));
584 memset(elem_index
, ~0, sizeof(elem_index
));
586 /* See if there are vertex attribs of each type to translate and
588 for (i
= 0; i
< mgr
->ve
->count
; i
++) {
589 unsigned vb_index
= mgr
->ve
->ve
[i
].vertex_buffer_index
;
591 if (!mgr
->vertex_buffer
[vb_index
].stride
) {
592 if (!(mgr
->ve
->incompatible_elem_mask
& (1 << i
)) &&
593 !(incompatible_vb_mask
& (1 << vb_index
))) {
596 mask
[VB_CONST
] |= 1 << vb_index
;
597 } else if (mgr
->ve
->ve
[i
].instance_divisor
) {
598 if (!(mgr
->ve
->incompatible_elem_mask
& (1 << i
)) &&
599 !(incompatible_vb_mask
& (1 << vb_index
))) {
602 mask
[VB_INSTANCE
] |= 1 << vb_index
;
604 if (!unroll_indices
&&
605 !(mgr
->ve
->incompatible_elem_mask
& (1 << i
)) &&
606 !(incompatible_vb_mask
& (1 << vb_index
))) {
609 mask
[VB_VERTEX
] |= 1 << vb_index
;
613 assert(mask
[VB_VERTEX
] || mask
[VB_INSTANCE
] || mask
[VB_CONST
]);
615 /* Find free vertex buffer slots. */
616 if (!u_vbuf_translate_find_free_vb_slots(mgr
, mask
)) {
620 /* Initialize the translate keys. */
621 for (i
= 0; i
< mgr
->ve
->count
; i
++) {
622 struct translate_key
*k
;
623 struct translate_element
*te
;
624 enum pipe_format output_format
= mgr
->ve
->native_format
[i
];
625 unsigned bit
, vb_index
= mgr
->ve
->ve
[i
].vertex_buffer_index
;
628 if (!(mgr
->ve
->incompatible_elem_mask
& (1 << i
)) &&
629 !(incompatible_vb_mask
& (1 << vb_index
)) &&
630 (!unroll_indices
|| !(mask
[VB_VERTEX
] & bit
))) {
634 /* Set type to what we will translate.
635 * Whether vertex, instance, or constant attribs. */
636 for (type
= 0; type
< VB_NUM
; type
++) {
637 if (mask
[type
] & bit
) {
641 assert(type
< VB_NUM
);
642 if (mgr
->ve
->ve
[i
].src_format
!= output_format
)
643 assert(translate_is_output_format_supported(output_format
));
644 /*printf("velem=%i type=%i\n", i, type);*/
646 /* Add the vertex element. */
648 elem_index
[type
][i
] = k
->nr_elements
;
650 te
= &k
->element
[k
->nr_elements
];
651 te
->type
= TRANSLATE_ELEMENT_NORMAL
;
652 te
->instance_divisor
= 0;
653 te
->input_buffer
= vb_index
;
654 te
->input_format
= mgr
->ve
->ve
[i
].src_format
;
655 te
->input_offset
= mgr
->ve
->ve
[i
].src_offset
;
656 te
->output_format
= output_format
;
657 te
->output_offset
= k
->output_stride
;
659 k
->output_stride
+= mgr
->ve
->native_format_size
[i
];
663 /* Translate buffers. */
664 for (type
= 0; type
< VB_NUM
; type
++) {
665 if (key
[type
].nr_elements
) {
667 err
= u_vbuf_translate_buffers(mgr
, &key
[type
], mask
[type
],
668 mgr
->fallback_vbs
[type
],
669 start
[type
], num
[type
],
670 start_index
, num_indices
, min_index
,
671 unroll_indices
&& type
== VB_VERTEX
);
675 /* Fixup the stride for constant attribs. */
676 if (type
== VB_CONST
) {
677 mgr
->real_vertex_buffer
[mgr
->fallback_vbs
[VB_CONST
]].stride
= 0;
682 /* Setup new vertex elements. */
683 for (i
= 0; i
< mgr
->ve
->count
; i
++) {
684 for (type
= 0; type
< VB_NUM
; type
++) {
685 if (elem_index
[type
][i
] < key
[type
].nr_elements
) {
686 struct translate_element
*te
= &key
[type
].element
[elem_index
[type
][i
]];
687 mgr
->fallback_velems
[i
].instance_divisor
= mgr
->ve
->ve
[i
].instance_divisor
;
688 mgr
->fallback_velems
[i
].src_format
= te
->output_format
;
689 mgr
->fallback_velems
[i
].src_offset
= te
->output_offset
;
690 mgr
->fallback_velems
[i
].vertex_buffer_index
= mgr
->fallback_vbs
[type
];
692 /* elem_index[type][i] can only be set for one type. */
693 assert(type
> VB_INSTANCE
|| elem_index
[type
+1][i
] == ~0u);
694 assert(type
> VB_VERTEX
|| elem_index
[type
+2][i
] == ~0u);
698 /* No translating, just copy the original vertex element over. */
699 if (type
== VB_NUM
) {
700 memcpy(&mgr
->fallback_velems
[i
], &mgr
->ve
->ve
[i
],
701 sizeof(struct pipe_vertex_element
));
705 u_vbuf_set_vertex_elements_internal(mgr
, mgr
->ve
->count
,
706 mgr
->fallback_velems
);
707 mgr
->using_translate
= TRUE
;
711 static void u_vbuf_translate_end(struct u_vbuf
*mgr
)
715 /* Restore vertex elements. */
716 mgr
->pipe
->bind_vertex_elements_state(mgr
->pipe
, mgr
->ve
->driver_cso
);
717 mgr
->using_translate
= FALSE
;
719 /* Unreference the now-unused VBOs. */
720 for (i
= 0; i
< VB_NUM
; i
++) {
721 unsigned vb
= mgr
->fallback_vbs
[i
];
723 pipe_resource_reference(&mgr
->real_vertex_buffer
[vb
].buffer
.resource
, NULL
);
724 mgr
->fallback_vbs
[i
] = ~0;
726 /* This will cause the buffer to be unbound in the driver later. */
727 mgr
->dirty_real_vb_mask
|= 1 << vb
;
733 u_vbuf_create_vertex_elements(struct u_vbuf
*mgr
, unsigned count
,
734 const struct pipe_vertex_element
*attribs
)
736 struct pipe_context
*pipe
= mgr
->pipe
;
738 struct pipe_vertex_element driver_attribs
[PIPE_MAX_ATTRIBS
];
739 struct u_vbuf_elements
*ve
= CALLOC_STRUCT(u_vbuf_elements
);
740 uint32_t used_buffers
= 0;
744 memcpy(ve
->ve
, attribs
, sizeof(struct pipe_vertex_element
) * count
);
745 memcpy(driver_attribs
, attribs
, sizeof(struct pipe_vertex_element
) * count
);
747 /* Set the best native format in case the original format is not
749 for (i
= 0; i
< count
; i
++) {
750 enum pipe_format format
= ve
->ve
[i
].src_format
;
752 ve
->src_format_size
[i
] = util_format_get_blocksize(format
);
754 used_buffers
|= 1 << ve
->ve
[i
].vertex_buffer_index
;
756 if (!ve
->ve
[i
].instance_divisor
) {
757 ve
->noninstance_vb_mask_any
|= 1 << ve
->ve
[i
].vertex_buffer_index
;
760 format
= mgr
->caps
.format_translation
[format
];
762 driver_attribs
[i
].src_format
= format
;
763 ve
->native_format
[i
] = format
;
764 ve
->native_format_size
[i
] =
765 util_format_get_blocksize(ve
->native_format
[i
]);
767 if (ve
->ve
[i
].src_format
!= format
||
768 (!mgr
->caps
.velem_src_offset_unaligned
&&
769 ve
->ve
[i
].src_offset
% 4 != 0)) {
770 ve
->incompatible_elem_mask
|= 1 << i
;
771 ve
->incompatible_vb_mask_any
|= 1 << ve
->ve
[i
].vertex_buffer_index
;
773 ve
->compatible_vb_mask_any
|= 1 << ve
->ve
[i
].vertex_buffer_index
;
777 ve
->used_vb_mask
= used_buffers
;
778 ve
->compatible_vb_mask_all
= ~ve
->incompatible_vb_mask_any
& used_buffers
;
779 ve
->incompatible_vb_mask_all
= ~ve
->compatible_vb_mask_any
& used_buffers
;
781 /* Align the formats and offsets to the size of DWORD if needed. */
782 if (!mgr
->caps
.velem_src_offset_unaligned
) {
783 for (i
= 0; i
< count
; i
++) {
784 ve
->native_format_size
[i
] = align(ve
->native_format_size
[i
], 4);
785 driver_attribs
[i
].src_offset
= align(ve
->ve
[i
].src_offset
, 4);
790 pipe
->create_vertex_elements_state(pipe
, count
, driver_attribs
);
794 static void u_vbuf_delete_vertex_elements(struct u_vbuf
*mgr
, void *cso
)
796 struct pipe_context
*pipe
= mgr
->pipe
;
797 struct u_vbuf_elements
*ve
= cso
;
799 pipe
->delete_vertex_elements_state(pipe
, ve
->driver_cso
);
803 void u_vbuf_set_vertex_buffers(struct u_vbuf
*mgr
,
804 unsigned start_slot
, unsigned count
,
805 const struct pipe_vertex_buffer
*bufs
)
808 /* which buffers are enabled */
809 uint32_t enabled_vb_mask
= 0;
810 /* which buffers are in user memory */
811 uint32_t user_vb_mask
= 0;
812 /* which buffers are incompatible with the driver */
813 uint32_t incompatible_vb_mask
= 0;
814 /* which buffers have a non-zero stride */
815 uint32_t nonzero_stride_vb_mask
= 0;
816 uint32_t mask
= ~(((1ull << count
) - 1) << start_slot
);
818 /* Zero out the bits we are going to rewrite completely. */
819 mgr
->user_vb_mask
&= mask
;
820 mgr
->incompatible_vb_mask
&= mask
;
821 mgr
->nonzero_stride_vb_mask
&= mask
;
822 mgr
->enabled_vb_mask
&= mask
;
825 struct pipe_context
*pipe
= mgr
->pipe
;
827 mgr
->dirty_real_vb_mask
&= mask
;
829 for (i
= 0; i
< count
; i
++) {
830 unsigned dst_index
= start_slot
+ i
;
832 pipe_vertex_buffer_unreference(&mgr
->vertex_buffer
[dst_index
]);
833 pipe_resource_reference(&mgr
->real_vertex_buffer
[dst_index
].buffer
.resource
,
837 pipe
->set_vertex_buffers(pipe
, start_slot
, count
, NULL
);
841 for (i
= 0; i
< count
; i
++) {
842 unsigned dst_index
= start_slot
+ i
;
843 const struct pipe_vertex_buffer
*vb
= &bufs
[i
];
844 struct pipe_vertex_buffer
*orig_vb
= &mgr
->vertex_buffer
[dst_index
];
845 struct pipe_vertex_buffer
*real_vb
= &mgr
->real_vertex_buffer
[dst_index
];
847 if (!vb
->buffer
.resource
) {
848 pipe_vertex_buffer_unreference(orig_vb
);
849 pipe_vertex_buffer_unreference(real_vb
);
853 pipe_vertex_buffer_reference(orig_vb
, vb
);
856 nonzero_stride_vb_mask
|= 1 << dst_index
;
858 enabled_vb_mask
|= 1 << dst_index
;
860 if ((!mgr
->caps
.buffer_offset_unaligned
&& vb
->buffer_offset
% 4 != 0) ||
861 (!mgr
->caps
.buffer_stride_unaligned
&& vb
->stride
% 4 != 0)) {
862 incompatible_vb_mask
|= 1 << dst_index
;
863 real_vb
->buffer_offset
= vb
->buffer_offset
;
864 real_vb
->stride
= vb
->stride
;
865 pipe_vertex_buffer_unreference(real_vb
);
866 real_vb
->is_user_buffer
= false;
870 if (!mgr
->caps
.user_vertex_buffers
&& vb
->is_user_buffer
) {
871 user_vb_mask
|= 1 << dst_index
;
872 real_vb
->buffer_offset
= vb
->buffer_offset
;
873 real_vb
->stride
= vb
->stride
;
874 pipe_vertex_buffer_unreference(real_vb
);
875 real_vb
->is_user_buffer
= false;
879 pipe_vertex_buffer_reference(real_vb
, vb
);
882 mgr
->user_vb_mask
|= user_vb_mask
;
883 mgr
->incompatible_vb_mask
|= incompatible_vb_mask
;
884 mgr
->nonzero_stride_vb_mask
|= nonzero_stride_vb_mask
;
885 mgr
->enabled_vb_mask
|= enabled_vb_mask
;
887 /* All changed buffers are marked as dirty, even the NULL ones,
888 * which will cause the NULL buffers to be unbound in the driver later. */
889 mgr
->dirty_real_vb_mask
|= ~mask
;
892 void u_vbuf_set_index_buffer(struct u_vbuf
*mgr
,
893 const struct pipe_index_buffer
*ib
)
895 struct pipe_context
*pipe
= mgr
->pipe
;
898 assert(ib
->offset
% ib
->index_size
== 0);
899 pipe_resource_reference(&mgr
->index_buffer
.buffer
, ib
->buffer
);
900 memcpy(&mgr
->index_buffer
, ib
, sizeof(*ib
));
902 pipe_resource_reference(&mgr
->index_buffer
.buffer
, NULL
);
905 pipe
->set_index_buffer(pipe
, ib
);
908 static enum pipe_error
909 u_vbuf_upload_buffers(struct u_vbuf
*mgr
,
910 int start_vertex
, unsigned num_vertices
,
911 int start_instance
, unsigned num_instances
)
914 unsigned nr_velems
= mgr
->ve
->count
;
915 struct pipe_vertex_element
*velems
=
916 mgr
->using_translate
? mgr
->fallback_velems
: mgr
->ve
->ve
;
917 unsigned start_offset
[PIPE_MAX_ATTRIBS
];
918 unsigned end_offset
[PIPE_MAX_ATTRIBS
];
919 uint32_t buffer_mask
= 0;
921 /* Determine how much data needs to be uploaded. */
922 for (i
= 0; i
< nr_velems
; i
++) {
923 struct pipe_vertex_element
*velem
= &velems
[i
];
924 unsigned index
= velem
->vertex_buffer_index
;
925 struct pipe_vertex_buffer
*vb
= &mgr
->vertex_buffer
[index
];
926 unsigned instance_div
, first
, size
, index_bit
;
928 /* Skip the buffers generated by translate. */
929 if (index
== mgr
->fallback_vbs
[VB_VERTEX
] ||
930 index
== mgr
->fallback_vbs
[VB_INSTANCE
] ||
931 index
== mgr
->fallback_vbs
[VB_CONST
]) {
935 if (!vb
->is_user_buffer
) {
939 instance_div
= velem
->instance_divisor
;
940 first
= vb
->buffer_offset
+ velem
->src_offset
;
943 /* Constant attrib. */
944 size
= mgr
->ve
->src_format_size
[i
];
945 } else if (instance_div
) {
946 /* Per-instance attrib. */
947 unsigned count
= (num_instances
+ instance_div
- 1) / instance_div
;
948 first
+= vb
->stride
* start_instance
;
949 size
= vb
->stride
* (count
- 1) + mgr
->ve
->src_format_size
[i
];
951 /* Per-vertex attrib. */
952 first
+= vb
->stride
* start_vertex
;
953 size
= vb
->stride
* (num_vertices
- 1) + mgr
->ve
->src_format_size
[i
];
956 index_bit
= 1 << index
;
958 /* Update offsets. */
959 if (!(buffer_mask
& index_bit
)) {
960 start_offset
[index
] = first
;
961 end_offset
[index
] = first
+ size
;
963 if (first
< start_offset
[index
])
964 start_offset
[index
] = first
;
965 if (first
+ size
> end_offset
[index
])
966 end_offset
[index
] = first
+ size
;
969 buffer_mask
|= index_bit
;
972 /* Upload buffers. */
973 while (buffer_mask
) {
975 struct pipe_vertex_buffer
*real_vb
;
978 i
= u_bit_scan(&buffer_mask
);
980 start
= start_offset
[i
];
984 real_vb
= &mgr
->real_vertex_buffer
[i
];
985 ptr
= mgr
->vertex_buffer
[i
].buffer
.user
;
987 u_upload_data(mgr
->pipe
->stream_uploader
, start
, end
- start
, 4,
988 ptr
+ start
, &real_vb
->buffer_offset
, &real_vb
->buffer
.resource
);
989 if (!real_vb
->buffer
.resource
)
990 return PIPE_ERROR_OUT_OF_MEMORY
;
992 real_vb
->buffer_offset
-= start
;
998 static boolean
u_vbuf_need_minmax_index(const struct u_vbuf
*mgr
)
1000 /* See if there are any per-vertex attribs which will be uploaded or
1001 * translated. Use bitmasks to get the info instead of looping over vertex
1003 return (mgr
->ve
->used_vb_mask
&
1004 ((mgr
->user_vb_mask
|
1005 mgr
->incompatible_vb_mask
|
1006 mgr
->ve
->incompatible_vb_mask_any
) &
1007 mgr
->ve
->noninstance_vb_mask_any
&
1008 mgr
->nonzero_stride_vb_mask
)) != 0;
1011 static boolean
u_vbuf_mapping_vertex_buffer_blocks(const struct u_vbuf
*mgr
)
1013 /* Return true if there are hw buffers which don't need to be translated.
1015 * We could query whether each buffer is busy, but that would
1016 * be way more costly than this. */
1017 return (mgr
->ve
->used_vb_mask
&
1018 (~mgr
->user_vb_mask
&
1019 ~mgr
->incompatible_vb_mask
&
1020 mgr
->ve
->compatible_vb_mask_all
&
1021 mgr
->ve
->noninstance_vb_mask_any
&
1022 mgr
->nonzero_stride_vb_mask
)) != 0;
1025 static void u_vbuf_get_minmax_index(struct pipe_context
*pipe
,
1026 struct pipe_index_buffer
*ib
,
1027 boolean primitive_restart
,
1028 unsigned restart_index
,
1029 unsigned start
, unsigned count
,
1033 struct pipe_transfer
*transfer
= NULL
;
1034 const void *indices
;
1037 if (ib
->user_buffer
) {
1038 indices
= (uint8_t*)ib
->user_buffer
+
1039 ib
->offset
+ start
* ib
->index_size
;
1041 indices
= pipe_buffer_map_range(pipe
, ib
->buffer
,
1042 ib
->offset
+ start
* ib
->index_size
,
1043 count
* ib
->index_size
,
1044 PIPE_TRANSFER_READ
, &transfer
);
1047 switch (ib
->index_size
) {
1049 const unsigned *ui_indices
= (const unsigned*)indices
;
1050 unsigned max_ui
= 0;
1051 unsigned min_ui
= ~0U;
1052 if (primitive_restart
) {
1053 for (i
= 0; i
< count
; i
++) {
1054 if (ui_indices
[i
] != restart_index
) {
1055 if (ui_indices
[i
] > max_ui
) max_ui
= ui_indices
[i
];
1056 if (ui_indices
[i
] < min_ui
) min_ui
= ui_indices
[i
];
1061 for (i
= 0; i
< count
; i
++) {
1062 if (ui_indices
[i
] > max_ui
) max_ui
= ui_indices
[i
];
1063 if (ui_indices
[i
] < min_ui
) min_ui
= ui_indices
[i
];
1066 *out_min_index
= min_ui
;
1067 *out_max_index
= max_ui
;
1071 const unsigned short *us_indices
= (const unsigned short*)indices
;
1072 unsigned max_us
= 0;
1073 unsigned min_us
= ~0U;
1074 if (primitive_restart
) {
1075 for (i
= 0; i
< count
; i
++) {
1076 if (us_indices
[i
] != restart_index
) {
1077 if (us_indices
[i
] > max_us
) max_us
= us_indices
[i
];
1078 if (us_indices
[i
] < min_us
) min_us
= us_indices
[i
];
1083 for (i
= 0; i
< count
; i
++) {
1084 if (us_indices
[i
] > max_us
) max_us
= us_indices
[i
];
1085 if (us_indices
[i
] < min_us
) min_us
= us_indices
[i
];
1088 *out_min_index
= min_us
;
1089 *out_max_index
= max_us
;
1093 const unsigned char *ub_indices
= (const unsigned char*)indices
;
1094 unsigned max_ub
= 0;
1095 unsigned min_ub
= ~0U;
1096 if (primitive_restart
) {
1097 for (i
= 0; i
< count
; i
++) {
1098 if (ub_indices
[i
] != restart_index
) {
1099 if (ub_indices
[i
] > max_ub
) max_ub
= ub_indices
[i
];
1100 if (ub_indices
[i
] < min_ub
) min_ub
= ub_indices
[i
];
1105 for (i
= 0; i
< count
; i
++) {
1106 if (ub_indices
[i
] > max_ub
) max_ub
= ub_indices
[i
];
1107 if (ub_indices
[i
] < min_ub
) min_ub
= ub_indices
[i
];
1110 *out_min_index
= min_ub
;
1111 *out_max_index
= max_ub
;
1121 pipe_buffer_unmap(pipe
, transfer
);
1125 static void u_vbuf_set_driver_vertex_buffers(struct u_vbuf
*mgr
)
1127 struct pipe_context
*pipe
= mgr
->pipe
;
1128 unsigned start_slot
, count
;
1130 start_slot
= ffs(mgr
->dirty_real_vb_mask
) - 1;
1131 count
= util_last_bit(mgr
->dirty_real_vb_mask
>> start_slot
);
1133 pipe
->set_vertex_buffers(pipe
, start_slot
, count
,
1134 mgr
->real_vertex_buffer
+ start_slot
);
1135 mgr
->dirty_real_vb_mask
= 0;
1138 void u_vbuf_draw_vbo(struct u_vbuf
*mgr
, const struct pipe_draw_info
*info
)
1140 struct pipe_context
*pipe
= mgr
->pipe
;
1141 int start_vertex
, min_index
;
1142 unsigned num_vertices
;
1143 boolean unroll_indices
= FALSE
;
1144 uint32_t used_vb_mask
= mgr
->ve
->used_vb_mask
;
1145 uint32_t user_vb_mask
= mgr
->user_vb_mask
& used_vb_mask
;
1146 uint32_t incompatible_vb_mask
= mgr
->incompatible_vb_mask
& used_vb_mask
;
1147 struct pipe_draw_info new_info
;
1149 /* Normal draw. No fallback and no user buffers. */
1150 if (!incompatible_vb_mask
&&
1151 !mgr
->ve
->incompatible_elem_mask
&&
1154 /* Set vertex buffers if needed. */
1155 if (mgr
->dirty_real_vb_mask
& used_vb_mask
) {
1156 u_vbuf_set_driver_vertex_buffers(mgr
);
1159 pipe
->draw_vbo(pipe
, info
);
1165 /* Fallback. We need to know all the parameters. */
1166 if (new_info
.indirect
) {
1167 struct pipe_transfer
*transfer
= NULL
;
1170 if (new_info
.indexed
) {
1171 data
= pipe_buffer_map_range(pipe
, new_info
.indirect
,
1172 new_info
.indirect_offset
, 20,
1173 PIPE_TRANSFER_READ
, &transfer
);
1174 new_info
.index_bias
= data
[3];
1175 new_info
.start_instance
= data
[4];
1178 data
= pipe_buffer_map_range(pipe
, new_info
.indirect
,
1179 new_info
.indirect_offset
, 16,
1180 PIPE_TRANSFER_READ
, &transfer
);
1181 new_info
.start_instance
= data
[3];
1184 new_info
.count
= data
[0];
1185 new_info
.instance_count
= data
[1];
1186 new_info
.start
= data
[2];
1187 pipe_buffer_unmap(pipe
, transfer
);
1188 new_info
.indirect
= NULL
;
1191 if (new_info
.indexed
) {
1192 /* See if anything needs to be done for per-vertex attribs. */
1193 if (u_vbuf_need_minmax_index(mgr
)) {
1196 if (new_info
.max_index
!= ~0u) {
1197 min_index
= new_info
.min_index
;
1198 max_index
= new_info
.max_index
;
1200 u_vbuf_get_minmax_index(mgr
->pipe
, &mgr
->index_buffer
,
1201 new_info
.primitive_restart
,
1202 new_info
.restart_index
, new_info
.start
,
1203 new_info
.count
, &min_index
, &max_index
);
1206 assert(min_index
<= max_index
);
1208 start_vertex
= min_index
+ new_info
.index_bias
;
1209 num_vertices
= max_index
+ 1 - min_index
;
1211 /* Primitive restart doesn't work when unrolling indices.
1212 * We would have to break this drawing operation into several ones. */
1213 /* Use some heuristic to see if unrolling indices improves
1215 if (!new_info
.primitive_restart
&&
1216 num_vertices
> new_info
.count
*2 &&
1217 num_vertices
- new_info
.count
> 32 &&
1218 !u_vbuf_mapping_vertex_buffer_blocks(mgr
)) {
1219 unroll_indices
= TRUE
;
1220 user_vb_mask
&= ~(mgr
->nonzero_stride_vb_mask
&
1221 mgr
->ve
->noninstance_vb_mask_any
);
1224 /* Nothing to do for per-vertex attribs. */
1230 start_vertex
= new_info
.start
;
1231 num_vertices
= new_info
.count
;
1235 /* Translate vertices with non-native layouts or formats. */
1236 if (unroll_indices
||
1237 incompatible_vb_mask
||
1238 mgr
->ve
->incompatible_elem_mask
) {
1239 if (!u_vbuf_translate_begin(mgr
, start_vertex
, num_vertices
,
1240 new_info
.start_instance
,
1241 new_info
.instance_count
, new_info
.start
,
1242 new_info
.count
, min_index
, unroll_indices
)) {
1243 debug_warn_once("u_vbuf_translate_begin() failed");
1247 if (unroll_indices
) {
1248 new_info
.indexed
= FALSE
;
1249 new_info
.index_bias
= 0;
1250 new_info
.min_index
= 0;
1251 new_info
.max_index
= new_info
.count
- 1;
1255 user_vb_mask
&= ~(incompatible_vb_mask
|
1256 mgr
->ve
->incompatible_vb_mask_all
);
1259 /* Upload user buffers. */
1261 if (u_vbuf_upload_buffers(mgr
, start_vertex
, num_vertices
,
1262 new_info
.start_instance
,
1263 new_info
.instance_count
) != PIPE_OK
) {
1264 debug_warn_once("u_vbuf_upload_buffers() failed");
1268 mgr
->dirty_real_vb_mask
|= user_vb_mask
;
1272 if (unroll_indices) {
1273 printf("unrolling indices: start_vertex = %i, num_vertices = %i\n",
1274 start_vertex, num_vertices);
1275 util_dump_draw_info(stdout, info);
1280 for (i = 0; i < mgr->nr_vertex_buffers; i++) {
1281 printf("input %i: ", i);
1282 util_dump_vertex_buffer(stdout, mgr->vertex_buffer+i);
1285 for (i = 0; i < mgr->nr_real_vertex_buffers; i++) {
1286 printf("real %i: ", i);
1287 util_dump_vertex_buffer(stdout, mgr->real_vertex_buffer+i);
1292 u_upload_unmap(pipe
->stream_uploader
);
1293 u_vbuf_set_driver_vertex_buffers(mgr
);
1295 pipe
->draw_vbo(pipe
, &new_info
);
1297 if (mgr
->using_translate
) {
1298 u_vbuf_translate_end(mgr
);
1302 void u_vbuf_save_vertex_elements(struct u_vbuf
*mgr
)
1304 assert(!mgr
->ve_saved
);
1305 mgr
->ve_saved
= mgr
->ve
;
1308 void u_vbuf_restore_vertex_elements(struct u_vbuf
*mgr
)
1310 if (mgr
->ve
!= mgr
->ve_saved
) {
1311 struct pipe_context
*pipe
= mgr
->pipe
;
1313 mgr
->ve
= mgr
->ve_saved
;
1314 pipe
->bind_vertex_elements_state(pipe
,
1315 mgr
->ve
? mgr
->ve
->driver_cso
: NULL
);
1317 mgr
->ve_saved
= NULL
;
1320 void u_vbuf_save_aux_vertex_buffer_slot(struct u_vbuf
*mgr
)
1322 pipe_vertex_buffer_reference(&mgr
->aux_vertex_buffer_saved
,
1323 &mgr
->vertex_buffer
[mgr
->aux_vertex_buffer_slot
]);
1326 void u_vbuf_restore_aux_vertex_buffer_slot(struct u_vbuf
*mgr
)
1328 u_vbuf_set_vertex_buffers(mgr
, mgr
->aux_vertex_buffer_slot
, 1,
1329 &mgr
->aux_vertex_buffer_saved
);
1330 pipe_vertex_buffer_unreference(&mgr
->aux_vertex_buffer_saved
);