1 /**************************************************************************
3 * Copyright 2011 Marek Olšák <maraeo@gmail.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 * This module uploads user buffers and translates the vertex buffers which
30 * contain incompatible vertices (i.e. not supported by the driver/hardware)
31 * into compatible ones, based on the Gallium CAPs.
33 * It does not upload index buffers.
35 * The module heavily uses bitmasks to represent per-buffer and
36 * per-vertex-element flags to avoid looping over the list of buffers just
37 * to see if there's a non-zero stride, or user buffer, or unsupported format,
40 * There are 3 categories of vertex elements, which are processed separately:
41 * - per-vertex attribs (stride != 0, instance_divisor == 0)
42 * - instanced attribs (stride != 0, instance_divisor > 0)
43 * - constant attribs (stride == 0)
45 * All needed uploads and translations are performed every draw command, but
46 * only the subset of vertices needed for that draw command is uploaded or
47 * translated. (the module never translates whole buffers)
50 * The module consists of two main parts:
53 * 1) Translate (u_vbuf_translate_begin/end)
55 * This is pretty much a vertex fetch fallback. It translates vertices from
56 * one vertex buffer to another in an unused vertex buffer slot. It does
57 * whatever is needed to make the vertices readable by the hardware (changes
58 * vertex formats and aligns offsets and strides). The translate module is
61 * Each of the 3 categories is translated to a separate buffer.
62 * Only the [min_index, max_index] range is translated. For instanced attribs,
63 * the range is [start_instance, start_instance+instance_count]. For constant
64 * attribs, the range is [0, 1].
67 * 2) User buffer uploading (u_vbuf_upload_buffers)
69 * Only the [min_index, max_index] range is uploaded (just like Translate)
70 * with a single memcpy.
72 * This method works best for non-indexed draw operations or indexed draw
73 * operations where the [min_index, max_index] range is not being way bigger
74 * than the vertex count.
76 * If the range is too big (e.g. one triangle with indices {0, 1, 10000}),
77 * the per-vertex attribs are uploaded via the translate module, all packed
78 * into one vertex buffer, and the indexed draw call is turned into
79 * a non-indexed one in the process. This adds additional complexity
80 * to the translate part, but it prevents bad apps from bringing your frame
84 * If there is nothing to do, it forwards every command to the driver.
85 * The module also has its own CSO cache of vertex element states.
88 #include "util/u_vbuf.h"
90 #include "util/u_dump.h"
91 #include "util/u_format.h"
92 #include "util/u_inlines.h"
93 #include "util/u_memory.h"
94 #include "util/u_upload_mgr.h"
95 #include "translate/translate.h"
96 #include "translate/translate_cache.h"
97 #include "cso_cache/cso_cache.h"
98 #include "cso_cache/cso_hash.h"
100 struct u_vbuf_elements
{
102 struct pipe_vertex_element ve
[PIPE_MAX_ATTRIBS
];
104 unsigned src_format_size
[PIPE_MAX_ATTRIBS
];
106 /* If (velem[i].src_format != native_format[i]), the vertex buffer
107 * referenced by the vertex element cannot be used for rendering and
108 * its vertex data must be translated to native_format[i]. */
109 enum pipe_format native_format
[PIPE_MAX_ATTRIBS
];
110 unsigned native_format_size
[PIPE_MAX_ATTRIBS
];
112 /* Which buffers are used by the vertex element state. */
113 uint32_t used_vb_mask
;
114 /* This might mean two things:
115 * - src_format != native_format, as discussed above.
116 * - src_offset % 4 != 0 (if the caps don't allow such an offset). */
117 uint32_t incompatible_elem_mask
; /* each bit describes a corresp. attrib */
118 /* Which buffer has at least one vertex element referencing it
120 uint32_t incompatible_vb_mask_any
;
121 /* Which buffer has all vertex elements referencing it incompatible. */
122 uint32_t incompatible_vb_mask_all
;
123 /* Which buffer has at least one vertex element referencing it
125 uint32_t compatible_vb_mask_any
;
126 /* Which buffer has all vertex elements referencing it compatible. */
127 uint32_t compatible_vb_mask_all
;
129 /* Which buffer has at least one vertex element referencing it
131 uint32_t noninstance_vb_mask_any
;
144 struct u_vbuf_caps caps
;
145 bool has_signed_vb_offset
;
147 struct pipe_context
*pipe
;
148 struct translate_cache
*translate_cache
;
149 struct cso_cache
*cso_cache
;
151 /* This is what was set in set_vertex_buffers.
152 * May contain user buffers. */
153 struct pipe_vertex_buffer vertex_buffer
[PIPE_MAX_ATTRIBS
];
154 uint32_t enabled_vb_mask
;
156 /* Saved vertex buffer. */
157 struct pipe_vertex_buffer vertex_buffer0_saved
;
159 /* Vertex buffers for the driver.
160 * There are usually no user buffers. */
161 struct pipe_vertex_buffer real_vertex_buffer
[PIPE_MAX_ATTRIBS
];
162 uint32_t dirty_real_vb_mask
; /* which buffers are dirty since the last
163 call of set_vertex_buffers */
165 /* Vertex elements. */
166 struct u_vbuf_elements
*ve
, *ve_saved
;
168 /* Vertex elements used for the translate fallback. */
169 struct pipe_vertex_element fallback_velems
[PIPE_MAX_ATTRIBS
];
170 /* If non-NULL, this is a vertex element state used for the translate
171 * fallback and therefore used for rendering too. */
172 boolean using_translate
;
173 /* The vertex buffer slot index where translated vertices have been
175 unsigned fallback_vbs
[VB_NUM
];
177 /* Which buffer is a user buffer. */
178 uint32_t user_vb_mask
; /* each bit describes a corresp. buffer */
179 /* Which buffer is incompatible (unaligned). */
180 uint32_t incompatible_vb_mask
; /* each bit describes a corresp. buffer */
181 /* Which buffer has a non-zero stride. */
182 uint32_t nonzero_stride_vb_mask
; /* each bit describes a corresp. buffer */
186 u_vbuf_create_vertex_elements(struct u_vbuf
*mgr
, unsigned count
,
187 const struct pipe_vertex_element
*attribs
);
188 static void u_vbuf_delete_vertex_elements(struct u_vbuf
*mgr
, void *cso
);
190 static const struct {
191 enum pipe_format from
, to
;
192 } vbuf_format_fallbacks
[] = {
193 { PIPE_FORMAT_R32_FIXED
, PIPE_FORMAT_R32_FLOAT
},
194 { PIPE_FORMAT_R32G32_FIXED
, PIPE_FORMAT_R32G32_FLOAT
},
195 { PIPE_FORMAT_R32G32B32_FIXED
, PIPE_FORMAT_R32G32B32_FLOAT
},
196 { PIPE_FORMAT_R32G32B32A32_FIXED
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
197 { PIPE_FORMAT_R16_FLOAT
, PIPE_FORMAT_R32_FLOAT
},
198 { PIPE_FORMAT_R16G16_FLOAT
, PIPE_FORMAT_R32G32_FLOAT
},
199 { PIPE_FORMAT_R16G16B16_FLOAT
, PIPE_FORMAT_R32G32B32_FLOAT
},
200 { PIPE_FORMAT_R16G16B16A16_FLOAT
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
201 { PIPE_FORMAT_R64_FLOAT
, PIPE_FORMAT_R32_FLOAT
},
202 { PIPE_FORMAT_R64G64_FLOAT
, PIPE_FORMAT_R32G32_FLOAT
},
203 { PIPE_FORMAT_R64G64B64_FLOAT
, PIPE_FORMAT_R32G32B32_FLOAT
},
204 { PIPE_FORMAT_R64G64B64A64_FLOAT
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
205 { PIPE_FORMAT_R32_UNORM
, PIPE_FORMAT_R32_FLOAT
},
206 { PIPE_FORMAT_R32G32_UNORM
, PIPE_FORMAT_R32G32_FLOAT
},
207 { PIPE_FORMAT_R32G32B32_UNORM
, PIPE_FORMAT_R32G32B32_FLOAT
},
208 { PIPE_FORMAT_R32G32B32A32_UNORM
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
209 { PIPE_FORMAT_R32_SNORM
, PIPE_FORMAT_R32_FLOAT
},
210 { PIPE_FORMAT_R32G32_SNORM
, PIPE_FORMAT_R32G32_FLOAT
},
211 { PIPE_FORMAT_R32G32B32_SNORM
, PIPE_FORMAT_R32G32B32_FLOAT
},
212 { PIPE_FORMAT_R32G32B32A32_SNORM
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
213 { PIPE_FORMAT_R32_USCALED
, PIPE_FORMAT_R32_FLOAT
},
214 { PIPE_FORMAT_R32G32_USCALED
, PIPE_FORMAT_R32G32_FLOAT
},
215 { PIPE_FORMAT_R32G32B32_USCALED
, PIPE_FORMAT_R32G32B32_FLOAT
},
216 { PIPE_FORMAT_R32G32B32A32_USCALED
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
217 { PIPE_FORMAT_R32_SSCALED
, PIPE_FORMAT_R32_FLOAT
},
218 { PIPE_FORMAT_R32G32_SSCALED
, PIPE_FORMAT_R32G32_FLOAT
},
219 { PIPE_FORMAT_R32G32B32_SSCALED
, PIPE_FORMAT_R32G32B32_FLOAT
},
220 { PIPE_FORMAT_R32G32B32A32_SSCALED
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
221 { PIPE_FORMAT_R16_UNORM
, PIPE_FORMAT_R32_FLOAT
},
222 { PIPE_FORMAT_R16G16_UNORM
, PIPE_FORMAT_R32G32_FLOAT
},
223 { PIPE_FORMAT_R16G16B16_UNORM
, PIPE_FORMAT_R32G32B32_FLOAT
},
224 { PIPE_FORMAT_R16G16B16A16_UNORM
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
225 { PIPE_FORMAT_R16_SNORM
, PIPE_FORMAT_R32_FLOAT
},
226 { PIPE_FORMAT_R16G16_SNORM
, PIPE_FORMAT_R32G32_FLOAT
},
227 { PIPE_FORMAT_R16G16B16_SNORM
, PIPE_FORMAT_R32G32B32_FLOAT
},
228 { PIPE_FORMAT_R16G16B16A16_SNORM
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
229 { PIPE_FORMAT_R16_USCALED
, PIPE_FORMAT_R32_FLOAT
},
230 { PIPE_FORMAT_R16G16_USCALED
, PIPE_FORMAT_R32G32_FLOAT
},
231 { PIPE_FORMAT_R16G16B16_USCALED
, PIPE_FORMAT_R32G32B32_FLOAT
},
232 { PIPE_FORMAT_R16G16B16A16_USCALED
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
233 { PIPE_FORMAT_R16_SSCALED
, PIPE_FORMAT_R32_FLOAT
},
234 { PIPE_FORMAT_R16G16_SSCALED
, PIPE_FORMAT_R32G32_FLOAT
},
235 { PIPE_FORMAT_R16G16B16_SSCALED
, PIPE_FORMAT_R32G32B32_FLOAT
},
236 { PIPE_FORMAT_R16G16B16A16_SSCALED
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
237 { PIPE_FORMAT_R8_UNORM
, PIPE_FORMAT_R32_FLOAT
},
238 { PIPE_FORMAT_R8G8_UNORM
, PIPE_FORMAT_R32G32_FLOAT
},
239 { PIPE_FORMAT_R8G8B8_UNORM
, PIPE_FORMAT_R32G32B32_FLOAT
},
240 { PIPE_FORMAT_R8G8B8A8_UNORM
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
241 { PIPE_FORMAT_R8_SNORM
, PIPE_FORMAT_R32_FLOAT
},
242 { PIPE_FORMAT_R8G8_SNORM
, PIPE_FORMAT_R32G32_FLOAT
},
243 { PIPE_FORMAT_R8G8B8_SNORM
, PIPE_FORMAT_R32G32B32_FLOAT
},
244 { PIPE_FORMAT_R8G8B8A8_SNORM
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
245 { PIPE_FORMAT_R8_USCALED
, PIPE_FORMAT_R32_FLOAT
},
246 { PIPE_FORMAT_R8G8_USCALED
, PIPE_FORMAT_R32G32_FLOAT
},
247 { PIPE_FORMAT_R8G8B8_USCALED
, PIPE_FORMAT_R32G32B32_FLOAT
},
248 { PIPE_FORMAT_R8G8B8A8_USCALED
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
249 { PIPE_FORMAT_R8_SSCALED
, PIPE_FORMAT_R32_FLOAT
},
250 { PIPE_FORMAT_R8G8_SSCALED
, PIPE_FORMAT_R32G32_FLOAT
},
251 { PIPE_FORMAT_R8G8B8_SSCALED
, PIPE_FORMAT_R32G32B32_FLOAT
},
252 { PIPE_FORMAT_R8G8B8A8_SSCALED
, PIPE_FORMAT_R32G32B32A32_FLOAT
},
255 boolean
u_vbuf_get_caps(struct pipe_screen
*screen
, struct u_vbuf_caps
*caps
,
259 boolean fallback
= FALSE
;
261 /* I'd rather have a bitfield of which formats are supported and a static
262 * table of the translations indexed by format, but since we don't have C99
263 * we can't easily make a sparsely-populated table indexed by format. So,
264 * we construct the sparse table here.
266 for (i
= 0; i
< PIPE_FORMAT_COUNT
; i
++)
267 caps
->format_translation
[i
] = i
;
269 for (i
= 0; i
< ARRAY_SIZE(vbuf_format_fallbacks
); i
++) {
270 enum pipe_format format
= vbuf_format_fallbacks
[i
].from
;
272 if (!screen
->is_format_supported(screen
, format
, PIPE_BUFFER
, 0, 0,
273 PIPE_BIND_VERTEX_BUFFER
)) {
274 caps
->format_translation
[format
] = vbuf_format_fallbacks
[i
].to
;
279 caps
->buffer_offset_unaligned
=
280 !screen
->get_param(screen
,
281 PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY
);
282 caps
->buffer_stride_unaligned
=
283 !screen
->get_param(screen
,
284 PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY
);
285 caps
->velem_src_offset_unaligned
=
286 !screen
->get_param(screen
,
287 PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY
);
288 caps
->user_vertex_buffers
=
289 screen
->get_param(screen
, PIPE_CAP_USER_VERTEX_BUFFERS
);
291 if (!caps
->buffer_offset_unaligned
||
292 !caps
->buffer_stride_unaligned
||
293 !caps
->velem_src_offset_unaligned
||
294 (!(flags
& U_VBUF_FLAG_NO_USER_VBOS
) && !caps
->user_vertex_buffers
)) {
302 u_vbuf_create(struct pipe_context
*pipe
, struct u_vbuf_caps
*caps
)
304 struct u_vbuf
*mgr
= CALLOC_STRUCT(u_vbuf
);
308 mgr
->cso_cache
= cso_cache_create();
309 mgr
->translate_cache
= translate_cache_create();
310 memset(mgr
->fallback_vbs
, ~0, sizeof(mgr
->fallback_vbs
));
312 mgr
->has_signed_vb_offset
=
313 pipe
->screen
->get_param(pipe
->screen
,
314 PIPE_CAP_SIGNED_VERTEX_BUFFER_OFFSET
);
319 /* u_vbuf uses its own caching for vertex elements, because it needs to keep
320 * its own preprocessed state per vertex element CSO. */
321 static struct u_vbuf_elements
*
322 u_vbuf_set_vertex_elements_internal(struct u_vbuf
*mgr
, unsigned count
,
323 const struct pipe_vertex_element
*states
)
325 struct pipe_context
*pipe
= mgr
->pipe
;
326 unsigned key_size
, hash_key
;
327 struct cso_hash_iter iter
;
328 struct u_vbuf_elements
*ve
;
329 struct cso_velems_state velems_state
;
331 /* need to include the count into the stored state data too. */
332 key_size
= sizeof(struct pipe_vertex_element
) * count
+ sizeof(unsigned);
333 velems_state
.count
= count
;
334 memcpy(velems_state
.velems
, states
,
335 sizeof(struct pipe_vertex_element
) * count
);
336 hash_key
= cso_construct_key((void*)&velems_state
, key_size
);
337 iter
= cso_find_state_template(mgr
->cso_cache
, hash_key
, CSO_VELEMENTS
,
338 (void*)&velems_state
, key_size
);
340 if (cso_hash_iter_is_null(iter
)) {
341 struct cso_velements
*cso
= MALLOC_STRUCT(cso_velements
);
342 memcpy(&cso
->state
, &velems_state
, key_size
);
343 cso
->data
= u_vbuf_create_vertex_elements(mgr
, count
, states
);
344 cso
->delete_state
= (cso_state_callback
)u_vbuf_delete_vertex_elements
;
345 cso
->context
= (void*)mgr
;
347 iter
= cso_insert_state(mgr
->cso_cache
, hash_key
, CSO_VELEMENTS
, cso
);
350 ve
= ((struct cso_velements
*)cso_hash_iter_data(iter
))->data
;
356 pipe
->bind_vertex_elements_state(pipe
, ve
->driver_cso
);
361 void u_vbuf_set_vertex_elements(struct u_vbuf
*mgr
, unsigned count
,
362 const struct pipe_vertex_element
*states
)
364 mgr
->ve
= u_vbuf_set_vertex_elements_internal(mgr
, count
, states
);
367 void u_vbuf_destroy(struct u_vbuf
*mgr
)
369 struct pipe_screen
*screen
= mgr
->pipe
->screen
;
371 const unsigned num_vb
= screen
->get_shader_param(screen
, PIPE_SHADER_VERTEX
,
372 PIPE_SHADER_CAP_MAX_INPUTS
);
374 mgr
->pipe
->set_vertex_buffers(mgr
->pipe
, 0, num_vb
, NULL
);
376 for (i
= 0; i
< PIPE_MAX_ATTRIBS
; i
++)
377 pipe_vertex_buffer_unreference(&mgr
->vertex_buffer
[i
]);
378 for (i
= 0; i
< PIPE_MAX_ATTRIBS
; i
++)
379 pipe_vertex_buffer_unreference(&mgr
->real_vertex_buffer
[i
]);
381 pipe_vertex_buffer_unreference(&mgr
->vertex_buffer0_saved
);
383 translate_cache_destroy(mgr
->translate_cache
);
384 cso_cache_delete(mgr
->cso_cache
);
388 static enum pipe_error
389 u_vbuf_translate_buffers(struct u_vbuf
*mgr
, struct translate_key
*key
,
390 const struct pipe_draw_info
*info
,
391 unsigned vb_mask
, unsigned out_vb
,
392 int start_vertex
, unsigned num_vertices
,
393 int min_index
, boolean unroll_indices
)
395 struct translate
*tr
;
396 struct pipe_transfer
*vb_transfer
[PIPE_MAX_ATTRIBS
] = {0};
397 struct pipe_resource
*out_buffer
= NULL
;
399 unsigned out_offset
, mask
;
401 /* Get a translate object. */
402 tr
= translate_cache_find(mgr
->translate_cache
, key
);
404 /* Map buffers we want to translate. */
407 struct pipe_vertex_buffer
*vb
;
410 unsigned i
= u_bit_scan(&mask
);
412 vb
= &mgr
->vertex_buffer
[i
];
413 offset
= vb
->buffer_offset
+ vb
->stride
* start_vertex
;
415 if (vb
->is_user_buffer
) {
416 map
= (uint8_t*)vb
->buffer
.user
+ offset
;
418 unsigned size
= vb
->stride
? num_vertices
* vb
->stride
421 if (!vb
->buffer
.resource
)
424 if (offset
+ size
> vb
->buffer
.resource
->width0
) {
425 /* Don't try to map past end of buffer. This often happens when
426 * we're translating an attribute that's at offset > 0 from the
427 * start of the vertex. If we'd subtract attrib's offset from
428 * the size, this probably wouldn't happen.
430 size
= vb
->buffer
.resource
->width0
- offset
;
432 /* Also adjust num_vertices. A common user error is to call
433 * glDrawRangeElements() with incorrect 'end' argument. The 'end
434 * value should be the max index value, but people often
435 * accidentally add one to this value. This adjustment avoids
436 * crashing (by reading past the end of a hardware buffer mapping)
437 * when people do that.
439 num_vertices
= (size
+ vb
->stride
- 1) / vb
->stride
;
442 map
= pipe_buffer_map_range(mgr
->pipe
, vb
->buffer
.resource
, offset
, size
,
443 PIPE_TRANSFER_READ
, &vb_transfer
[i
]);
446 /* Subtract min_index so that indexing with the index buffer works. */
447 if (unroll_indices
) {
448 map
-= (ptrdiff_t)vb
->stride
* min_index
;
451 tr
->set_buffer(tr
, i
, map
, vb
->stride
, info
->max_index
);
455 if (unroll_indices
) {
456 struct pipe_transfer
*transfer
= NULL
;
457 const unsigned offset
= info
->start
* info
->index_size
;
460 /* Create and map the output buffer. */
461 u_upload_alloc(mgr
->pipe
->stream_uploader
, 0,
462 key
->output_stride
* info
->count
, 4,
463 &out_offset
, &out_buffer
,
466 return PIPE_ERROR_OUT_OF_MEMORY
;
468 if (info
->has_user_indices
) {
469 map
= (uint8_t*)info
->index
.user
+ offset
;
471 map
= pipe_buffer_map_range(mgr
->pipe
, info
->index
.resource
, offset
,
472 info
->count
* info
->index_size
,
473 PIPE_TRANSFER_READ
, &transfer
);
476 switch (info
->index_size
) {
478 tr
->run_elts(tr
, (unsigned*)map
, info
->count
, 0, 0, out_map
);
481 tr
->run_elts16(tr
, (uint16_t*)map
, info
->count
, 0, 0, out_map
);
484 tr
->run_elts8(tr
, map
, info
->count
, 0, 0, out_map
);
489 pipe_buffer_unmap(mgr
->pipe
, transfer
);
492 /* Create and map the output buffer. */
493 u_upload_alloc(mgr
->pipe
->stream_uploader
,
494 mgr
->has_signed_vb_offset
?
495 0 : key
->output_stride
* start_vertex
,
496 key
->output_stride
* num_vertices
, 4,
497 &out_offset
, &out_buffer
,
500 return PIPE_ERROR_OUT_OF_MEMORY
;
502 out_offset
-= key
->output_stride
* start_vertex
;
504 tr
->run(tr
, 0, num_vertices
, 0, 0, out_map
);
507 /* Unmap all buffers. */
510 unsigned i
= u_bit_scan(&mask
);
512 if (vb_transfer
[i
]) {
513 pipe_buffer_unmap(mgr
->pipe
, vb_transfer
[i
]);
517 /* Setup the new vertex buffer. */
518 mgr
->real_vertex_buffer
[out_vb
].buffer_offset
= out_offset
;
519 mgr
->real_vertex_buffer
[out_vb
].stride
= key
->output_stride
;
521 /* Move the buffer reference. */
522 pipe_vertex_buffer_unreference(&mgr
->real_vertex_buffer
[out_vb
]);
523 mgr
->real_vertex_buffer
[out_vb
].buffer
.resource
= out_buffer
;
524 mgr
->real_vertex_buffer
[out_vb
].is_user_buffer
= false;
530 u_vbuf_translate_find_free_vb_slots(struct u_vbuf
*mgr
,
531 unsigned mask
[VB_NUM
])
534 unsigned fallback_vbs
[VB_NUM
];
535 /* Set the bit for each buffer which is incompatible, or isn't set. */
536 uint32_t unused_vb_mask
=
537 mgr
->ve
->incompatible_vb_mask_all
| mgr
->incompatible_vb_mask
|
538 ~mgr
->enabled_vb_mask
;
540 memset(fallback_vbs
, ~0, sizeof(fallback_vbs
));
542 /* Find free slots for each type if needed. */
543 for (type
= 0; type
< VB_NUM
; type
++) {
547 if (!unused_vb_mask
) {
551 index
= ffs(unused_vb_mask
) - 1;
552 fallback_vbs
[type
] = index
;
553 unused_vb_mask
&= ~(1 << index
);
554 /*printf("found slot=%i for type=%i\n", index, type);*/
558 for (type
= 0; type
< VB_NUM
; type
++) {
560 mgr
->dirty_real_vb_mask
|= 1 << fallback_vbs
[type
];
564 memcpy(mgr
->fallback_vbs
, fallback_vbs
, sizeof(fallback_vbs
));
569 u_vbuf_translate_begin(struct u_vbuf
*mgr
,
570 const struct pipe_draw_info
*info
,
571 int start_vertex
, unsigned num_vertices
,
572 int min_index
, boolean unroll_indices
)
574 unsigned mask
[VB_NUM
] = {0};
575 struct translate_key key
[VB_NUM
];
576 unsigned elem_index
[VB_NUM
][PIPE_MAX_ATTRIBS
]; /* ... into key.elements */
578 const unsigned incompatible_vb_mask
= mgr
->incompatible_vb_mask
&
579 mgr
->ve
->used_vb_mask
;
581 const int start
[VB_NUM
] = {
582 start_vertex
, /* VERTEX */
583 info
->start_instance
, /* INSTANCE */
587 const unsigned num
[VB_NUM
] = {
588 num_vertices
, /* VERTEX */
589 info
->instance_count
, /* INSTANCE */
593 memset(key
, 0, sizeof(key
));
594 memset(elem_index
, ~0, sizeof(elem_index
));
596 /* See if there are vertex attribs of each type to translate and
598 for (i
= 0; i
< mgr
->ve
->count
; i
++) {
599 unsigned vb_index
= mgr
->ve
->ve
[i
].vertex_buffer_index
;
601 if (!mgr
->vertex_buffer
[vb_index
].stride
) {
602 if (!(mgr
->ve
->incompatible_elem_mask
& (1 << i
)) &&
603 !(incompatible_vb_mask
& (1 << vb_index
))) {
606 mask
[VB_CONST
] |= 1 << vb_index
;
607 } else if (mgr
->ve
->ve
[i
].instance_divisor
) {
608 if (!(mgr
->ve
->incompatible_elem_mask
& (1 << i
)) &&
609 !(incompatible_vb_mask
& (1 << vb_index
))) {
612 mask
[VB_INSTANCE
] |= 1 << vb_index
;
614 if (!unroll_indices
&&
615 !(mgr
->ve
->incompatible_elem_mask
& (1 << i
)) &&
616 !(incompatible_vb_mask
& (1 << vb_index
))) {
619 mask
[VB_VERTEX
] |= 1 << vb_index
;
623 assert(mask
[VB_VERTEX
] || mask
[VB_INSTANCE
] || mask
[VB_CONST
]);
625 /* Find free vertex buffer slots. */
626 if (!u_vbuf_translate_find_free_vb_slots(mgr
, mask
)) {
630 /* Initialize the translate keys. */
631 for (i
= 0; i
< mgr
->ve
->count
; i
++) {
632 struct translate_key
*k
;
633 struct translate_element
*te
;
634 enum pipe_format output_format
= mgr
->ve
->native_format
[i
];
635 unsigned bit
, vb_index
= mgr
->ve
->ve
[i
].vertex_buffer_index
;
638 if (!(mgr
->ve
->incompatible_elem_mask
& (1 << i
)) &&
639 !(incompatible_vb_mask
& (1 << vb_index
)) &&
640 (!unroll_indices
|| !(mask
[VB_VERTEX
] & bit
))) {
644 /* Set type to what we will translate.
645 * Whether vertex, instance, or constant attribs. */
646 for (type
= 0; type
< VB_NUM
; type
++) {
647 if (mask
[type
] & bit
) {
651 assert(type
< VB_NUM
);
652 if (mgr
->ve
->ve
[i
].src_format
!= output_format
)
653 assert(translate_is_output_format_supported(output_format
));
654 /*printf("velem=%i type=%i\n", i, type);*/
656 /* Add the vertex element. */
658 elem_index
[type
][i
] = k
->nr_elements
;
660 te
= &k
->element
[k
->nr_elements
];
661 te
->type
= TRANSLATE_ELEMENT_NORMAL
;
662 te
->instance_divisor
= 0;
663 te
->input_buffer
= vb_index
;
664 te
->input_format
= mgr
->ve
->ve
[i
].src_format
;
665 te
->input_offset
= mgr
->ve
->ve
[i
].src_offset
;
666 te
->output_format
= output_format
;
667 te
->output_offset
= k
->output_stride
;
669 k
->output_stride
+= mgr
->ve
->native_format_size
[i
];
673 /* Translate buffers. */
674 for (type
= 0; type
< VB_NUM
; type
++) {
675 if (key
[type
].nr_elements
) {
677 err
= u_vbuf_translate_buffers(mgr
, &key
[type
], info
, mask
[type
],
678 mgr
->fallback_vbs
[type
],
679 start
[type
], num
[type
], min_index
,
680 unroll_indices
&& type
== VB_VERTEX
);
684 /* Fixup the stride for constant attribs. */
685 if (type
== VB_CONST
) {
686 mgr
->real_vertex_buffer
[mgr
->fallback_vbs
[VB_CONST
]].stride
= 0;
691 /* Setup new vertex elements. */
692 for (i
= 0; i
< mgr
->ve
->count
; i
++) {
693 for (type
= 0; type
< VB_NUM
; type
++) {
694 if (elem_index
[type
][i
] < key
[type
].nr_elements
) {
695 struct translate_element
*te
= &key
[type
].element
[elem_index
[type
][i
]];
696 mgr
->fallback_velems
[i
].instance_divisor
= mgr
->ve
->ve
[i
].instance_divisor
;
697 mgr
->fallback_velems
[i
].src_format
= te
->output_format
;
698 mgr
->fallback_velems
[i
].src_offset
= te
->output_offset
;
699 mgr
->fallback_velems
[i
].vertex_buffer_index
= mgr
->fallback_vbs
[type
];
701 /* elem_index[type][i] can only be set for one type. */
702 assert(type
> VB_INSTANCE
|| elem_index
[type
+1][i
] == ~0u);
703 assert(type
> VB_VERTEX
|| elem_index
[type
+2][i
] == ~0u);
707 /* No translating, just copy the original vertex element over. */
708 if (type
== VB_NUM
) {
709 memcpy(&mgr
->fallback_velems
[i
], &mgr
->ve
->ve
[i
],
710 sizeof(struct pipe_vertex_element
));
714 u_vbuf_set_vertex_elements_internal(mgr
, mgr
->ve
->count
,
715 mgr
->fallback_velems
);
716 mgr
->using_translate
= TRUE
;
720 static void u_vbuf_translate_end(struct u_vbuf
*mgr
)
724 /* Restore vertex elements. */
725 mgr
->pipe
->bind_vertex_elements_state(mgr
->pipe
, mgr
->ve
->driver_cso
);
726 mgr
->using_translate
= FALSE
;
728 /* Unreference the now-unused VBOs. */
729 for (i
= 0; i
< VB_NUM
; i
++) {
730 unsigned vb
= mgr
->fallback_vbs
[i
];
732 pipe_resource_reference(&mgr
->real_vertex_buffer
[vb
].buffer
.resource
, NULL
);
733 mgr
->fallback_vbs
[i
] = ~0;
735 /* This will cause the buffer to be unbound in the driver later. */
736 mgr
->dirty_real_vb_mask
|= 1 << vb
;
742 u_vbuf_create_vertex_elements(struct u_vbuf
*mgr
, unsigned count
,
743 const struct pipe_vertex_element
*attribs
)
745 struct pipe_context
*pipe
= mgr
->pipe
;
747 struct pipe_vertex_element driver_attribs
[PIPE_MAX_ATTRIBS
];
748 struct u_vbuf_elements
*ve
= CALLOC_STRUCT(u_vbuf_elements
);
749 uint32_t used_buffers
= 0;
753 memcpy(ve
->ve
, attribs
, sizeof(struct pipe_vertex_element
) * count
);
754 memcpy(driver_attribs
, attribs
, sizeof(struct pipe_vertex_element
) * count
);
756 /* Set the best native format in case the original format is not
758 for (i
= 0; i
< count
; i
++) {
759 enum pipe_format format
= ve
->ve
[i
].src_format
;
761 ve
->src_format_size
[i
] = util_format_get_blocksize(format
);
763 used_buffers
|= 1 << ve
->ve
[i
].vertex_buffer_index
;
765 if (!ve
->ve
[i
].instance_divisor
) {
766 ve
->noninstance_vb_mask_any
|= 1 << ve
->ve
[i
].vertex_buffer_index
;
769 format
= mgr
->caps
.format_translation
[format
];
771 driver_attribs
[i
].src_format
= format
;
772 ve
->native_format
[i
] = format
;
773 ve
->native_format_size
[i
] =
774 util_format_get_blocksize(ve
->native_format
[i
]);
776 if (ve
->ve
[i
].src_format
!= format
||
777 (!mgr
->caps
.velem_src_offset_unaligned
&&
778 ve
->ve
[i
].src_offset
% 4 != 0)) {
779 ve
->incompatible_elem_mask
|= 1 << i
;
780 ve
->incompatible_vb_mask_any
|= 1 << ve
->ve
[i
].vertex_buffer_index
;
782 ve
->compatible_vb_mask_any
|= 1 << ve
->ve
[i
].vertex_buffer_index
;
786 ve
->used_vb_mask
= used_buffers
;
787 ve
->compatible_vb_mask_all
= ~ve
->incompatible_vb_mask_any
& used_buffers
;
788 ve
->incompatible_vb_mask_all
= ~ve
->compatible_vb_mask_any
& used_buffers
;
790 /* Align the formats and offsets to the size of DWORD if needed. */
791 if (!mgr
->caps
.velem_src_offset_unaligned
) {
792 for (i
= 0; i
< count
; i
++) {
793 ve
->native_format_size
[i
] = align(ve
->native_format_size
[i
], 4);
794 driver_attribs
[i
].src_offset
= align(ve
->ve
[i
].src_offset
, 4);
799 pipe
->create_vertex_elements_state(pipe
, count
, driver_attribs
);
803 static void u_vbuf_delete_vertex_elements(struct u_vbuf
*mgr
, void *cso
)
805 struct pipe_context
*pipe
= mgr
->pipe
;
806 struct u_vbuf_elements
*ve
= cso
;
808 pipe
->delete_vertex_elements_state(pipe
, ve
->driver_cso
);
812 void u_vbuf_set_vertex_buffers(struct u_vbuf
*mgr
,
813 unsigned start_slot
, unsigned count
,
814 const struct pipe_vertex_buffer
*bufs
)
817 /* which buffers are enabled */
818 uint32_t enabled_vb_mask
= 0;
819 /* which buffers are in user memory */
820 uint32_t user_vb_mask
= 0;
821 /* which buffers are incompatible with the driver */
822 uint32_t incompatible_vb_mask
= 0;
823 /* which buffers have a non-zero stride */
824 uint32_t nonzero_stride_vb_mask
= 0;
825 const uint32_t mask
= ~(((1ull << count
) - 1) << start_slot
);
827 /* Zero out the bits we are going to rewrite completely. */
828 mgr
->user_vb_mask
&= mask
;
829 mgr
->incompatible_vb_mask
&= mask
;
830 mgr
->nonzero_stride_vb_mask
&= mask
;
831 mgr
->enabled_vb_mask
&= mask
;
834 struct pipe_context
*pipe
= mgr
->pipe
;
836 mgr
->dirty_real_vb_mask
&= mask
;
838 for (i
= 0; i
< count
; i
++) {
839 unsigned dst_index
= start_slot
+ i
;
841 pipe_vertex_buffer_unreference(&mgr
->vertex_buffer
[dst_index
]);
842 pipe_vertex_buffer_unreference(&mgr
->real_vertex_buffer
[dst_index
]);
845 pipe
->set_vertex_buffers(pipe
, start_slot
, count
, NULL
);
849 for (i
= 0; i
< count
; i
++) {
850 unsigned dst_index
= start_slot
+ i
;
851 const struct pipe_vertex_buffer
*vb
= &bufs
[i
];
852 struct pipe_vertex_buffer
*orig_vb
= &mgr
->vertex_buffer
[dst_index
];
853 struct pipe_vertex_buffer
*real_vb
= &mgr
->real_vertex_buffer
[dst_index
];
855 if (!vb
->buffer
.resource
) {
856 pipe_vertex_buffer_unreference(orig_vb
);
857 pipe_vertex_buffer_unreference(real_vb
);
861 pipe_vertex_buffer_reference(orig_vb
, vb
);
864 nonzero_stride_vb_mask
|= 1 << dst_index
;
866 enabled_vb_mask
|= 1 << dst_index
;
868 if ((!mgr
->caps
.buffer_offset_unaligned
&& vb
->buffer_offset
% 4 != 0) ||
869 (!mgr
->caps
.buffer_stride_unaligned
&& vb
->stride
% 4 != 0)) {
870 incompatible_vb_mask
|= 1 << dst_index
;
871 real_vb
->buffer_offset
= vb
->buffer_offset
;
872 real_vb
->stride
= vb
->stride
;
873 pipe_vertex_buffer_unreference(real_vb
);
874 real_vb
->is_user_buffer
= false;
878 if (!mgr
->caps
.user_vertex_buffers
&& vb
->is_user_buffer
) {
879 user_vb_mask
|= 1 << dst_index
;
880 real_vb
->buffer_offset
= vb
->buffer_offset
;
881 real_vb
->stride
= vb
->stride
;
882 pipe_vertex_buffer_unreference(real_vb
);
883 real_vb
->is_user_buffer
= false;
887 pipe_vertex_buffer_reference(real_vb
, vb
);
890 mgr
->user_vb_mask
|= user_vb_mask
;
891 mgr
->incompatible_vb_mask
|= incompatible_vb_mask
;
892 mgr
->nonzero_stride_vb_mask
|= nonzero_stride_vb_mask
;
893 mgr
->enabled_vb_mask
|= enabled_vb_mask
;
895 /* All changed buffers are marked as dirty, even the NULL ones,
896 * which will cause the NULL buffers to be unbound in the driver later. */
897 mgr
->dirty_real_vb_mask
|= ~mask
;
900 static enum pipe_error
901 u_vbuf_upload_buffers(struct u_vbuf
*mgr
,
902 int start_vertex
, unsigned num_vertices
,
903 int start_instance
, unsigned num_instances
)
906 unsigned nr_velems
= mgr
->ve
->count
;
907 const struct pipe_vertex_element
*velems
=
908 mgr
->using_translate
? mgr
->fallback_velems
: mgr
->ve
->ve
;
909 unsigned start_offset
[PIPE_MAX_ATTRIBS
];
910 unsigned end_offset
[PIPE_MAX_ATTRIBS
];
911 uint32_t buffer_mask
= 0;
913 /* Determine how much data needs to be uploaded. */
914 for (i
= 0; i
< nr_velems
; i
++) {
915 const struct pipe_vertex_element
*velem
= &velems
[i
];
916 unsigned index
= velem
->vertex_buffer_index
;
917 struct pipe_vertex_buffer
*vb
= &mgr
->vertex_buffer
[index
];
918 unsigned instance_div
, first
, size
, index_bit
;
920 /* Skip the buffers generated by translate. */
921 if (index
== mgr
->fallback_vbs
[VB_VERTEX
] ||
922 index
== mgr
->fallback_vbs
[VB_INSTANCE
] ||
923 index
== mgr
->fallback_vbs
[VB_CONST
]) {
927 if (!vb
->is_user_buffer
) {
931 instance_div
= velem
->instance_divisor
;
932 first
= vb
->buffer_offset
+ velem
->src_offset
;
935 /* Constant attrib. */
936 size
= mgr
->ve
->src_format_size
[i
];
937 } else if (instance_div
) {
938 /* Per-instance attrib. */
940 /* Figure out how many instances we'll render given instance_div. We
941 * can't use the typical div_round_up() pattern because the CTS uses
942 * instance_div = ~0 for a test, which overflows div_round_up()'s
945 unsigned count
= num_instances
/ instance_div
;
946 if (count
* instance_div
!= num_instances
)
949 first
+= vb
->stride
* start_instance
;
950 size
= vb
->stride
* (count
- 1) + mgr
->ve
->src_format_size
[i
];
952 /* Per-vertex attrib. */
953 first
+= vb
->stride
* start_vertex
;
954 size
= vb
->stride
* (num_vertices
- 1) + mgr
->ve
->src_format_size
[i
];
957 index_bit
= 1 << index
;
959 /* Update offsets. */
960 if (!(buffer_mask
& index_bit
)) {
961 start_offset
[index
] = first
;
962 end_offset
[index
] = first
+ size
;
964 if (first
< start_offset
[index
])
965 start_offset
[index
] = first
;
966 if (first
+ size
> end_offset
[index
])
967 end_offset
[index
] = first
+ size
;
970 buffer_mask
|= index_bit
;
973 /* Upload buffers. */
974 while (buffer_mask
) {
976 struct pipe_vertex_buffer
*real_vb
;
979 i
= u_bit_scan(&buffer_mask
);
981 start
= start_offset
[i
];
985 real_vb
= &mgr
->real_vertex_buffer
[i
];
986 ptr
= mgr
->vertex_buffer
[i
].buffer
.user
;
988 u_upload_data(mgr
->pipe
->stream_uploader
,
989 mgr
->has_signed_vb_offset
? 0 : start
,
991 ptr
+ start
, &real_vb
->buffer_offset
, &real_vb
->buffer
.resource
);
992 if (!real_vb
->buffer
.resource
)
993 return PIPE_ERROR_OUT_OF_MEMORY
;
995 real_vb
->buffer_offset
-= start
;
1001 static boolean
u_vbuf_need_minmax_index(const struct u_vbuf
*mgr
)
1003 /* See if there are any per-vertex attribs which will be uploaded or
1004 * translated. Use bitmasks to get the info instead of looping over vertex
1006 return (mgr
->ve
->used_vb_mask
&
1007 ((mgr
->user_vb_mask
|
1008 mgr
->incompatible_vb_mask
|
1009 mgr
->ve
->incompatible_vb_mask_any
) &
1010 mgr
->ve
->noninstance_vb_mask_any
&
1011 mgr
->nonzero_stride_vb_mask
)) != 0;
1014 static boolean
u_vbuf_mapping_vertex_buffer_blocks(const struct u_vbuf
*mgr
)
1016 /* Return true if there are hw buffers which don't need to be translated.
1018 * We could query whether each buffer is busy, but that would
1019 * be way more costly than this. */
1020 return (mgr
->ve
->used_vb_mask
&
1021 (~mgr
->user_vb_mask
&
1022 ~mgr
->incompatible_vb_mask
&
1023 mgr
->ve
->compatible_vb_mask_all
&
1024 mgr
->ve
->noninstance_vb_mask_any
&
1025 mgr
->nonzero_stride_vb_mask
)) != 0;
1029 u_vbuf_get_minmax_index_mapped(const struct pipe_draw_info
*info
,
1030 const void *indices
, unsigned *out_min_index
,
1031 unsigned *out_max_index
)
1036 switch (info
->index_size
) {
1038 const unsigned *ui_indices
= (const unsigned*)indices
;
1039 if (info
->primitive_restart
) {
1040 for (unsigned i
= 0; i
< info
->count
; i
++) {
1041 if (ui_indices
[i
] != info
->restart_index
) {
1042 if (ui_indices
[i
] > max
) max
= ui_indices
[i
];
1043 if (ui_indices
[i
] < min
) min
= ui_indices
[i
];
1048 for (unsigned i
= 0; i
< info
->count
; i
++) {
1049 if (ui_indices
[i
] > max
) max
= ui_indices
[i
];
1050 if (ui_indices
[i
] < min
) min
= ui_indices
[i
];
1056 const unsigned short *us_indices
= (const unsigned short*)indices
;
1057 if (info
->primitive_restart
) {
1058 for (unsigned i
= 0; i
< info
->count
; i
++) {
1059 if (us_indices
[i
] != info
->restart_index
) {
1060 if (us_indices
[i
] > max
) max
= us_indices
[i
];
1061 if (us_indices
[i
] < min
) min
= us_indices
[i
];
1066 for (unsigned i
= 0; i
< info
->count
; i
++) {
1067 if (us_indices
[i
] > max
) max
= us_indices
[i
];
1068 if (us_indices
[i
] < min
) min
= us_indices
[i
];
1074 const unsigned char *ub_indices
= (const unsigned char*)indices
;
1075 if (info
->primitive_restart
) {
1076 for (unsigned i
= 0; i
< info
->count
; i
++) {
1077 if (ub_indices
[i
] != info
->restart_index
) {
1078 if (ub_indices
[i
] > max
) max
= ub_indices
[i
];
1079 if (ub_indices
[i
] < min
) min
= ub_indices
[i
];
1084 for (unsigned i
= 0; i
< info
->count
; i
++) {
1085 if (ub_indices
[i
] > max
) max
= ub_indices
[i
];
1086 if (ub_indices
[i
] < min
) min
= ub_indices
[i
];
1095 *out_min_index
= min
;
1096 *out_max_index
= max
;
1099 void u_vbuf_get_minmax_index(struct pipe_context
*pipe
,
1100 const struct pipe_draw_info
*info
,
1101 unsigned *out_min_index
, unsigned *out_max_index
)
1103 struct pipe_transfer
*transfer
= NULL
;
1104 const void *indices
;
1106 if (info
->has_user_indices
) {
1107 indices
= (uint8_t*)info
->index
.user
+
1108 info
->start
* info
->index_size
;
1110 indices
= pipe_buffer_map_range(pipe
, info
->index
.resource
,
1111 info
->start
* info
->index_size
,
1112 info
->count
* info
->index_size
,
1113 PIPE_TRANSFER_READ
, &transfer
);
1116 u_vbuf_get_minmax_index_mapped(info
, indices
, out_min_index
, out_max_index
);
1119 pipe_buffer_unmap(pipe
, transfer
);
1123 static void u_vbuf_set_driver_vertex_buffers(struct u_vbuf
*mgr
)
1125 struct pipe_context
*pipe
= mgr
->pipe
;
1126 unsigned start_slot
, count
;
1128 start_slot
= ffs(mgr
->dirty_real_vb_mask
) - 1;
1129 count
= util_last_bit(mgr
->dirty_real_vb_mask
>> start_slot
);
1131 pipe
->set_vertex_buffers(pipe
, start_slot
, count
,
1132 mgr
->real_vertex_buffer
+ start_slot
);
1133 mgr
->dirty_real_vb_mask
= 0;
1137 u_vbuf_split_indexed_multidraw(struct u_vbuf
*mgr
, struct pipe_draw_info
*info
,
1138 unsigned *indirect_data
, unsigned stride
,
1139 unsigned draw_count
)
1141 assert(info
->index_size
);
1142 info
->indirect
= NULL
;
1144 for (unsigned i
= 0; i
< draw_count
; i
++) {
1145 unsigned offset
= i
* stride
/ 4;
1147 info
->count
= indirect_data
[offset
+ 0];
1148 info
->instance_count
= indirect_data
[offset
+ 1];
1150 if (!info
->count
|| !info
->instance_count
)
1153 info
->start
= indirect_data
[offset
+ 2];
1154 info
->index_bias
= indirect_data
[offset
+ 3];
1155 info
->start_instance
= indirect_data
[offset
+ 4];
1157 u_vbuf_draw_vbo(mgr
, info
);
1161 void u_vbuf_draw_vbo(struct u_vbuf
*mgr
, const struct pipe_draw_info
*info
)
1163 struct pipe_context
*pipe
= mgr
->pipe
;
1166 unsigned num_vertices
;
1167 boolean unroll_indices
= FALSE
;
1168 const uint32_t used_vb_mask
= mgr
->ve
->used_vb_mask
;
1169 uint32_t user_vb_mask
= mgr
->user_vb_mask
& used_vb_mask
;
1170 const uint32_t incompatible_vb_mask
=
1171 mgr
->incompatible_vb_mask
& used_vb_mask
;
1172 struct pipe_draw_info new_info
;
1174 /* Normal draw. No fallback and no user buffers. */
1175 if (!incompatible_vb_mask
&&
1176 !mgr
->ve
->incompatible_elem_mask
&&
1179 /* Set vertex buffers if needed. */
1180 if (mgr
->dirty_real_vb_mask
& used_vb_mask
) {
1181 u_vbuf_set_driver_vertex_buffers(mgr
);
1184 pipe
->draw_vbo(pipe
, info
);
1190 /* Handle indirect (multi)draws. */
1191 if (new_info
.indirect
) {
1192 const struct pipe_draw_indirect_info
*indirect
= new_info
.indirect
;
1193 unsigned draw_count
= 0;
1195 /* Get the number of draws. */
1196 if (indirect
->indirect_draw_count
) {
1197 pipe_buffer_read(pipe
, indirect
->indirect_draw_count
,
1198 indirect
->indirect_draw_count_offset
,
1201 draw_count
= indirect
->draw_count
;
1207 unsigned data_size
= (draw_count
- 1) * indirect
->stride
+
1208 (new_info
.index_size
? 20 : 16);
1209 unsigned *data
= malloc(data_size
);
1211 return; /* report an error? */
1213 /* Read the used buffer range only once, because the read can be
1216 pipe_buffer_read(pipe
, indirect
->buffer
, indirect
->offset
, data_size
,
1219 if (info
->index_size
) {
1220 /* Indexed multidraw. */
1221 unsigned index_bias0
= data
[3];
1222 bool index_bias_same
= true;
1224 /* If we invoke the translate path, we have to split the multidraw. */
1225 if (incompatible_vb_mask
||
1226 mgr
->ve
->incompatible_elem_mask
) {
1227 u_vbuf_split_indexed_multidraw(mgr
, &new_info
, data
,
1228 indirect
->stride
, draw_count
);
1233 /* See if index_bias is the same for all draws. */
1234 for (unsigned i
= 1; i
< draw_count
; i
++) {
1235 if (data
[i
* indirect
->stride
/ 4 + 3] != index_bias0
) {
1236 index_bias_same
= false;
1241 /* Split the multidraw if index_bias is different. */
1242 if (!index_bias_same
) {
1243 u_vbuf_split_indexed_multidraw(mgr
, &new_info
, data
,
1244 indirect
->stride
, draw_count
);
1249 /* If we don't need to use the translate path and index_bias is
1250 * the same, we can process the multidraw with the time complexity
1251 * equal to 1 draw call (except for the index range computation).
1252 * We only need to compute the index range covering all draw calls
1255 * The driver will not look at these values because indirect != NULL.
1256 * These values determine the user buffer bounds to upload.
1258 new_info
.index_bias
= index_bias0
;
1259 new_info
.min_index
= ~0u;
1260 new_info
.max_index
= 0;
1261 new_info
.start_instance
= ~0u;
1262 unsigned end_instance
= 0;
1264 struct pipe_transfer
*transfer
= NULL
;
1265 const uint8_t *indices
;
1267 if (info
->has_user_indices
) {
1268 indices
= (uint8_t*)info
->index
.user
;
1270 indices
= (uint8_t*)pipe_buffer_map(pipe
, info
->index
.resource
,
1271 PIPE_TRANSFER_READ
, &transfer
);
1274 for (unsigned i
= 0; i
< draw_count
; i
++) {
1275 unsigned offset
= i
* indirect
->stride
/ 4;
1276 unsigned start
= data
[offset
+ 2];
1277 unsigned count
= data
[offset
+ 0];
1278 unsigned start_instance
= data
[offset
+ 4];
1279 unsigned instance_count
= data
[offset
+ 1];
1281 if (!count
|| !instance_count
)
1284 /* Update the ranges of instances. */
1285 new_info
.start_instance
= MIN2(new_info
.start_instance
,
1287 end_instance
= MAX2(end_instance
, start_instance
+ instance_count
);
1289 /* Update the index range. */
1291 new_info
.count
= count
; /* only used by get_minmax_index */
1292 u_vbuf_get_minmax_index_mapped(&new_info
,
1294 new_info
.index_size
* start
,
1297 new_info
.min_index
= MIN2(new_info
.min_index
, min
);
1298 new_info
.max_index
= MAX2(new_info
.max_index
, max
);
1303 pipe_buffer_unmap(pipe
, transfer
);
1305 /* Set the final instance count. */
1306 new_info
.instance_count
= end_instance
- new_info
.start_instance
;
1308 if (new_info
.start_instance
== ~0u || !new_info
.instance_count
)
1311 /* Non-indexed multidraw.
1313 * Keep the draw call indirect and compute minimums & maximums,
1314 * which will determine the user buffer bounds to upload, but
1315 * the driver will not look at these values because indirect != NULL.
1317 * This efficiently processes the multidraw with the time complexity
1318 * equal to 1 draw call.
1320 new_info
.start
= ~0u;
1321 new_info
.start_instance
= ~0u;
1322 unsigned end_vertex
= 0;
1323 unsigned end_instance
= 0;
1325 for (unsigned i
= 0; i
< draw_count
; i
++) {
1326 unsigned offset
= i
* indirect
->stride
/ 4;
1327 unsigned start
= data
[offset
+ 2];
1328 unsigned count
= data
[offset
+ 0];
1329 unsigned start_instance
= data
[offset
+ 3];
1330 unsigned instance_count
= data
[offset
+ 1];
1332 new_info
.start
= MIN2(new_info
.start
, start
);
1333 new_info
.start_instance
= MIN2(new_info
.start_instance
,
1336 end_vertex
= MAX2(end_vertex
, start
+ count
);
1337 end_instance
= MAX2(end_instance
, start_instance
+ instance_count
);
1341 /* Set the final counts. */
1342 new_info
.count
= end_vertex
- new_info
.start
;
1343 new_info
.instance_count
= end_instance
- new_info
.start_instance
;
1345 if (new_info
.start
== ~0u || !new_info
.count
|| !new_info
.instance_count
)
1350 if (new_info
.index_size
) {
1351 /* See if anything needs to be done for per-vertex attribs. */
1352 if (u_vbuf_need_minmax_index(mgr
)) {
1355 if (new_info
.max_index
!= ~0u) {
1356 min_index
= new_info
.min_index
;
1357 max_index
= new_info
.max_index
;
1359 u_vbuf_get_minmax_index(mgr
->pipe
, &new_info
,
1360 &min_index
, &max_index
);
1363 assert(min_index
<= max_index
);
1365 start_vertex
= min_index
+ new_info
.index_bias
;
1366 num_vertices
= max_index
+ 1 - min_index
;
1368 /* Primitive restart doesn't work when unrolling indices.
1369 * We would have to break this drawing operation into several ones. */
1370 /* Use some heuristic to see if unrolling indices improves
1372 if (!info
->indirect
&&
1373 !new_info
.primitive_restart
&&
1374 num_vertices
> new_info
.count
*2 &&
1375 num_vertices
- new_info
.count
> 32 &&
1376 !u_vbuf_mapping_vertex_buffer_blocks(mgr
)) {
1377 unroll_indices
= TRUE
;
1378 user_vb_mask
&= ~(mgr
->nonzero_stride_vb_mask
&
1379 mgr
->ve
->noninstance_vb_mask_any
);
1382 /* Nothing to do for per-vertex attribs. */
1388 start_vertex
= new_info
.start
;
1389 num_vertices
= new_info
.count
;
1393 /* Translate vertices with non-native layouts or formats. */
1394 if (unroll_indices
||
1395 incompatible_vb_mask
||
1396 mgr
->ve
->incompatible_elem_mask
) {
1397 if (!u_vbuf_translate_begin(mgr
, &new_info
, start_vertex
, num_vertices
,
1398 min_index
, unroll_indices
)) {
1399 debug_warn_once("u_vbuf_translate_begin() failed");
1403 if (unroll_indices
) {
1404 new_info
.index_size
= 0;
1405 new_info
.index_bias
= 0;
1406 new_info
.min_index
= 0;
1407 new_info
.max_index
= new_info
.count
- 1;
1411 user_vb_mask
&= ~(incompatible_vb_mask
|
1412 mgr
->ve
->incompatible_vb_mask_all
);
1415 /* Upload user buffers. */
1417 if (u_vbuf_upload_buffers(mgr
, start_vertex
, num_vertices
,
1418 new_info
.start_instance
,
1419 new_info
.instance_count
) != PIPE_OK
) {
1420 debug_warn_once("u_vbuf_upload_buffers() failed");
1424 mgr
->dirty_real_vb_mask
|= user_vb_mask
;
1428 if (unroll_indices) {
1429 printf("unrolling indices: start_vertex = %i, num_vertices = %i\n",
1430 start_vertex, num_vertices);
1431 util_dump_draw_info(stdout, info);
1436 for (i = 0; i < mgr->nr_vertex_buffers; i++) {
1437 printf("input %i: ", i);
1438 util_dump_vertex_buffer(stdout, mgr->vertex_buffer+i);
1441 for (i = 0; i < mgr->nr_real_vertex_buffers; i++) {
1442 printf("real %i: ", i);
1443 util_dump_vertex_buffer(stdout, mgr->real_vertex_buffer+i);
1448 u_upload_unmap(pipe
->stream_uploader
);
1449 u_vbuf_set_driver_vertex_buffers(mgr
);
1451 pipe
->draw_vbo(pipe
, &new_info
);
1453 if (mgr
->using_translate
) {
1454 u_vbuf_translate_end(mgr
);
1458 void u_vbuf_save_vertex_elements(struct u_vbuf
*mgr
)
1460 assert(!mgr
->ve_saved
);
1461 mgr
->ve_saved
= mgr
->ve
;
1464 void u_vbuf_restore_vertex_elements(struct u_vbuf
*mgr
)
1466 if (mgr
->ve
!= mgr
->ve_saved
) {
1467 struct pipe_context
*pipe
= mgr
->pipe
;
1469 mgr
->ve
= mgr
->ve_saved
;
1470 pipe
->bind_vertex_elements_state(pipe
,
1471 mgr
->ve
? mgr
->ve
->driver_cso
: NULL
);
1473 mgr
->ve_saved
= NULL
;
1476 void u_vbuf_save_vertex_buffer0(struct u_vbuf
*mgr
)
1478 pipe_vertex_buffer_reference(&mgr
->vertex_buffer0_saved
,
1479 &mgr
->vertex_buffer
[0]);
1482 void u_vbuf_restore_vertex_buffer0(struct u_vbuf
*mgr
)
1484 u_vbuf_set_vertex_buffers(mgr
, 0, 1, &mgr
->vertex_buffer0_saved
);
1485 pipe_vertex_buffer_unreference(&mgr
->vertex_buffer0_saved
);