1 /**************************************************************************
3 * Copyright 2011 Marek Olšák <maraeo@gmail.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "util/u_vbuf_mgr.h"
30 #include "util/u_format.h"
31 #include "util/u_inlines.h"
32 #include "util/u_memory.h"
33 #include "util/u_upload_mgr.h"
34 #include "translate/translate.h"
35 #include "translate/translate_cache.h"
37 struct u_vbuf_elements
{
39 struct pipe_vertex_element ve
[PIPE_MAX_ATTRIBS
];
41 unsigned src_format_size
[PIPE_MAX_ATTRIBS
];
43 /* If (velem[i].src_format != native_format[i]), the vertex buffer
44 * referenced by the vertex element cannot be used for rendering and
45 * its vertex data must be translated to native_format[i]. */
46 enum pipe_format native_format
[PIPE_MAX_ATTRIBS
];
47 unsigned native_format_size
[PIPE_MAX_ATTRIBS
];
49 /* This might mean two things:
50 * - src_format != native_format, as discussed above.
51 * - src_offset % 4 != 0 (if the caps don't allow such an offset). */
52 boolean incompatible_layout
;
53 /* Per-element flags. */
54 boolean incompatible_layout_elem
[PIPE_MAX_ATTRIBS
];
59 struct pipe_context
*pipe
;
60 struct translate_cache
*translate_cache
;
62 /* Vertex element state bound by the state tracker. */
64 /* and its associated helper structure for this module. */
65 struct u_vbuf_elements
*ve
;
67 /* Vertex elements used for the translate fallback. */
68 struct pipe_vertex_element fallback_velems
[PIPE_MAX_ATTRIBS
];
69 /* If non-NULL, this is a vertex element state used for the translate
70 * fallback and therefore used for rendering too. */
72 /* The vertex buffer slot index where translated vertices have been
74 unsigned fallback_vb_slot
;
75 /* When binding the fallback vertex element state, we don't want to
76 * change saved_ve and ve. This is set to TRUE in such cases. */
77 boolean ve_binding_lock
;
79 /* Whether there is any user buffer. */
81 /* Whether there is a buffer with a non-native layout. */
82 boolean incompatible_vb_layout
;
83 /* Per-buffer flags. */
84 boolean incompatible_vb
[PIPE_MAX_ATTRIBS
];
87 static void u_vbuf_init_format_caps(struct u_vbuf_priv
*mgr
)
89 struct pipe_screen
*screen
= mgr
->pipe
->screen
;
91 mgr
->b
.caps
.format_fixed32
=
92 screen
->is_format_supported(screen
, PIPE_FORMAT_R32_FIXED
, PIPE_BUFFER
,
93 0, PIPE_BIND_VERTEX_BUFFER
);
95 mgr
->b
.caps
.format_float16
=
96 screen
->is_format_supported(screen
, PIPE_FORMAT_R16_FLOAT
, PIPE_BUFFER
,
97 0, PIPE_BIND_VERTEX_BUFFER
);
99 mgr
->b
.caps
.format_float64
=
100 screen
->is_format_supported(screen
, PIPE_FORMAT_R64_FLOAT
, PIPE_BUFFER
,
101 0, PIPE_BIND_VERTEX_BUFFER
);
103 mgr
->b
.caps
.format_norm32
=
104 screen
->is_format_supported(screen
, PIPE_FORMAT_R32_UNORM
, PIPE_BUFFER
,
105 0, PIPE_BIND_VERTEX_BUFFER
) &&
106 screen
->is_format_supported(screen
, PIPE_FORMAT_R32_SNORM
, PIPE_BUFFER
,
107 0, PIPE_BIND_VERTEX_BUFFER
);
109 mgr
->b
.caps
.format_scaled32
=
110 screen
->is_format_supported(screen
, PIPE_FORMAT_R32_USCALED
, PIPE_BUFFER
,
111 0, PIPE_BIND_VERTEX_BUFFER
) &&
112 screen
->is_format_supported(screen
, PIPE_FORMAT_R32_SSCALED
, PIPE_BUFFER
,
113 0, PIPE_BIND_VERTEX_BUFFER
);
117 u_vbuf_create(struct pipe_context
*pipe
,
118 unsigned upload_buffer_size
,
119 unsigned upload_buffer_alignment
,
120 unsigned upload_buffer_bind
,
121 enum u_fetch_alignment fetch_alignment
)
123 struct u_vbuf_priv
*mgr
= CALLOC_STRUCT(u_vbuf_priv
);
126 mgr
->translate_cache
= translate_cache_create();
127 mgr
->fallback_vb_slot
= ~0;
129 mgr
->b
.uploader
= u_upload_create(pipe
, upload_buffer_size
,
130 upload_buffer_alignment
,
133 mgr
->b
.caps
.fetch_dword_unaligned
=
134 fetch_alignment
== U_VERTEX_FETCH_BYTE_ALIGNED
;
136 u_vbuf_init_format_caps(mgr
);
141 void u_vbuf_destroy(struct u_vbuf_mgr
*mgrb
)
143 struct u_vbuf_priv
*mgr
= (struct u_vbuf_priv
*)mgrb
;
146 for (i
= 0; i
< mgr
->b
.nr_vertex_buffers
; i
++) {
147 pipe_resource_reference(&mgr
->b
.vertex_buffer
[i
].buffer
, NULL
);
149 for (i
= 0; i
< mgr
->b
.nr_real_vertex_buffers
; i
++) {
150 pipe_resource_reference(&mgr
->b
.real_vertex_buffer
[i
].buffer
, NULL
);
153 translate_cache_destroy(mgr
->translate_cache
);
154 u_upload_destroy(mgr
->b
.uploader
);
160 u_vbuf_translate_begin(struct u_vbuf_priv
*mgr
,
161 int min_index
, int max_index
)
163 struct translate_key key
;
164 struct translate_element
*te
;
165 unsigned tr_elem_index
[PIPE_MAX_ATTRIBS
];
166 struct translate
*tr
;
167 boolean vb_translated
[PIPE_MAX_ATTRIBS
] = {0};
168 uint8_t *vb_map
[PIPE_MAX_ATTRIBS
] = {0}, *out_map
;
169 struct pipe_transfer
*vb_transfer
[PIPE_MAX_ATTRIBS
] = {0};
170 struct pipe_resource
*out_buffer
= NULL
;
171 unsigned i
, num_verts
, out_offset
;
172 boolean upload_flushed
= FALSE
;
174 memset(&key
, 0, sizeof(key
));
175 memset(tr_elem_index
, 0xff, sizeof(tr_elem_index
));
177 /* Initialize the translate key, i.e. the recipe how vertices should be
179 memset(&key
, 0, sizeof key
);
180 for (i
= 0; i
< mgr
->ve
->count
; i
++) {
181 enum pipe_format output_format
= mgr
->ve
->native_format
[i
];
182 unsigned output_format_size
= mgr
->ve
->native_format_size
[i
];
184 /* Check for support. */
185 if (!mgr
->ve
->incompatible_layout_elem
[i
] &&
186 !mgr
->incompatible_vb
[mgr
->ve
->ve
[i
].vertex_buffer_index
]) {
190 /* Workaround for translate: output floats instead of halfs. */
191 switch (output_format
) {
192 case PIPE_FORMAT_R16_FLOAT
:
193 output_format
= PIPE_FORMAT_R32_FLOAT
;
194 output_format_size
= 4;
196 case PIPE_FORMAT_R16G16_FLOAT
:
197 output_format
= PIPE_FORMAT_R32G32_FLOAT
;
198 output_format_size
= 8;
200 case PIPE_FORMAT_R16G16B16_FLOAT
:
201 output_format
= PIPE_FORMAT_R32G32B32_FLOAT
;
202 output_format_size
= 12;
204 case PIPE_FORMAT_R16G16B16A16_FLOAT
:
205 output_format
= PIPE_FORMAT_R32G32B32A32_FLOAT
;
206 output_format_size
= 16;
211 /* Add this vertex element. */
212 te
= &key
.element
[key
.nr_elements
];
214 te->instance_divisor;*/
215 te
->input_buffer
= mgr
->ve
->ve
[i
].vertex_buffer_index
;
216 te
->input_format
= mgr
->ve
->ve
[i
].src_format
;
217 te
->input_offset
= mgr
->ve
->ve
[i
].src_offset
;
218 te
->output_format
= output_format
;
219 te
->output_offset
= key
.output_stride
;
221 key
.output_stride
+= output_format_size
;
222 vb_translated
[mgr
->ve
->ve
[i
].vertex_buffer_index
] = TRUE
;
223 tr_elem_index
[i
] = key
.nr_elements
;
227 /* Get a translate object. */
228 tr
= translate_cache_find(mgr
->translate_cache
, &key
);
230 /* Map buffers we want to translate. */
231 for (i
= 0; i
< mgr
->b
.nr_vertex_buffers
; i
++) {
232 if (vb_translated
[i
]) {
233 struct pipe_vertex_buffer
*vb
= &mgr
->b
.vertex_buffer
[i
];
235 vb_map
[i
] = pipe_buffer_map(mgr
->pipe
, vb
->buffer
,
236 PIPE_TRANSFER_READ
, &vb_transfer
[i
]);
238 tr
->set_buffer(tr
, i
,
239 vb_map
[i
] + vb
->buffer_offset
+ vb
->stride
* min_index
,
244 /* Create and map the output buffer. */
245 num_verts
= max_index
+ 1 - min_index
;
247 u_upload_alloc(mgr
->b
.uploader
,
248 key
.output_stride
* min_index
,
249 key
.output_stride
* num_verts
,
250 &out_offset
, &out_buffer
, &upload_flushed
,
253 out_offset
-= key
.output_stride
* min_index
;
256 tr
->run(tr
, 0, num_verts
, 0, out_map
);
258 /* Unmap all buffers. */
259 for (i
= 0; i
< mgr
->b
.nr_vertex_buffers
; i
++) {
260 if (vb_translated
[i
]) {
261 pipe_buffer_unmap(mgr
->pipe
, vb_transfer
[i
]);
265 /* Setup the new vertex buffer in the first free slot. */
266 mgr
->fallback_vb_slot
= ~0;
267 for (i
= 0; i
< PIPE_MAX_ATTRIBS
; i
++) {
268 if (!mgr
->b
.vertex_buffer
[i
].buffer
) {
269 mgr
->fallback_vb_slot
= i
;
271 if (i
>= mgr
->b
.nr_vertex_buffers
) {
272 mgr
->b
.nr_real_vertex_buffers
= i
+1;
278 if (mgr
->fallback_vb_slot
!= ~0) {
279 /* Setup the new vertex buffer. */
280 pipe_resource_reference(
281 &mgr
->b
.real_vertex_buffer
[mgr
->fallback_vb_slot
].buffer
, out_buffer
);
282 mgr
->b
.real_vertex_buffer
[mgr
->fallback_vb_slot
].buffer_offset
= out_offset
;
283 mgr
->b
.real_vertex_buffer
[mgr
->fallback_vb_slot
].stride
= key
.output_stride
;
285 /* Setup new vertex elements. */
286 for (i
= 0; i
< mgr
->ve
->count
; i
++) {
287 if (tr_elem_index
[i
] < key
.nr_elements
) {
288 te
= &key
.element
[tr_elem_index
[i
]];
289 mgr
->fallback_velems
[i
].instance_divisor
= mgr
->ve
->ve
[i
].instance_divisor
;
290 mgr
->fallback_velems
[i
].src_format
= te
->output_format
;
291 mgr
->fallback_velems
[i
].src_offset
= te
->output_offset
;
292 mgr
->fallback_velems
[i
].vertex_buffer_index
= mgr
->fallback_vb_slot
;
294 memcpy(&mgr
->fallback_velems
[i
], &mgr
->ve
->ve
[i
],
295 sizeof(struct pipe_vertex_element
));
300 mgr
->pipe
->create_vertex_elements_state(mgr
->pipe
, mgr
->ve
->count
,
301 mgr
->fallback_velems
);
303 /* Preserve saved_ve. */
304 mgr
->ve_binding_lock
= TRUE
;
305 mgr
->pipe
->bind_vertex_elements_state(mgr
->pipe
, mgr
->fallback_ve
);
306 mgr
->ve_binding_lock
= FALSE
;
309 pipe_resource_reference(&out_buffer
, NULL
);
312 static void u_vbuf_translate_end(struct u_vbuf_priv
*mgr
)
314 if (mgr
->fallback_ve
== NULL
) {
318 /* Restore vertex elements. */
319 /* Note that saved_ve will be overwritten in bind_vertex_elements_state. */
320 mgr
->pipe
->bind_vertex_elements_state(mgr
->pipe
, mgr
->saved_ve
);
321 mgr
->pipe
->delete_vertex_elements_state(mgr
->pipe
, mgr
->fallback_ve
);
322 mgr
->fallback_ve
= NULL
;
324 /* Delete the now-unused VBO. */
325 pipe_resource_reference(&mgr
->b
.real_vertex_buffer
[mgr
->fallback_vb_slot
].buffer
,
327 mgr
->fallback_vb_slot
= ~0;
328 mgr
->b
.nr_real_vertex_buffers
= mgr
->b
.nr_vertex_buffers
;
331 #define FORMAT_REPLACE(what, withwhat) \
332 case PIPE_FORMAT_##what: format = PIPE_FORMAT_##withwhat; break
334 struct u_vbuf_elements
*
335 u_vbuf_create_vertex_elements(struct u_vbuf_mgr
*mgrb
,
337 const struct pipe_vertex_element
*attribs
,
338 struct pipe_vertex_element
*native_attribs
)
340 struct u_vbuf_priv
*mgr
= (struct u_vbuf_priv
*)mgrb
;
342 struct u_vbuf_elements
*ve
= CALLOC_STRUCT(u_vbuf_elements
);
350 memcpy(ve
->ve
, attribs
, sizeof(struct pipe_vertex_element
) * count
);
351 memcpy(native_attribs
, attribs
, sizeof(struct pipe_vertex_element
) * count
);
353 /* Set the best native format in case the original format is not
355 for (i
= 0; i
< count
; i
++) {
356 enum pipe_format format
= ve
->ve
[i
].src_format
;
358 ve
->src_format_size
[i
] = util_format_get_blocksize(format
);
360 /* Choose a native format.
361 * For now we don't care about the alignment, that's going to
362 * be sorted out later. */
363 if (!mgr
->b
.caps
.format_fixed32
) {
365 FORMAT_REPLACE(R32_FIXED
, R32_FLOAT
);
366 FORMAT_REPLACE(R32G32_FIXED
, R32G32_FLOAT
);
367 FORMAT_REPLACE(R32G32B32_FIXED
, R32G32B32_FLOAT
);
368 FORMAT_REPLACE(R32G32B32A32_FIXED
, R32G32B32A32_FLOAT
);
372 if (!mgr
->b
.caps
.format_float16
) {
374 FORMAT_REPLACE(R16_FLOAT
, R32_FLOAT
);
375 FORMAT_REPLACE(R16G16_FLOAT
, R32G32_FLOAT
);
376 FORMAT_REPLACE(R16G16B16_FLOAT
, R32G32B32_FLOAT
);
377 FORMAT_REPLACE(R16G16B16A16_FLOAT
, R32G32B32A32_FLOAT
);
381 if (!mgr
->b
.caps
.format_float64
) {
383 FORMAT_REPLACE(R64_FLOAT
, R32_FLOAT
);
384 FORMAT_REPLACE(R64G64_FLOAT
, R32G32_FLOAT
);
385 FORMAT_REPLACE(R64G64B64_FLOAT
, R32G32B32_FLOAT
);
386 FORMAT_REPLACE(R64G64B64A64_FLOAT
, R32G32B32A32_FLOAT
);
390 if (!mgr
->b
.caps
.format_norm32
) {
392 FORMAT_REPLACE(R32_UNORM
, R32_FLOAT
);
393 FORMAT_REPLACE(R32G32_UNORM
, R32G32_FLOAT
);
394 FORMAT_REPLACE(R32G32B32_UNORM
, R32G32B32_FLOAT
);
395 FORMAT_REPLACE(R32G32B32A32_UNORM
, R32G32B32A32_FLOAT
);
396 FORMAT_REPLACE(R32_SNORM
, R32_FLOAT
);
397 FORMAT_REPLACE(R32G32_SNORM
, R32G32_FLOAT
);
398 FORMAT_REPLACE(R32G32B32_SNORM
, R32G32B32_FLOAT
);
399 FORMAT_REPLACE(R32G32B32A32_SNORM
, R32G32B32A32_FLOAT
);
403 if (!mgr
->b
.caps
.format_scaled32
) {
405 FORMAT_REPLACE(R32_USCALED
, R32_FLOAT
);
406 FORMAT_REPLACE(R32G32_USCALED
, R32G32_FLOAT
);
407 FORMAT_REPLACE(R32G32B32_USCALED
, R32G32B32_FLOAT
);
408 FORMAT_REPLACE(R32G32B32A32_USCALED
,R32G32B32A32_FLOAT
);
409 FORMAT_REPLACE(R32_SSCALED
, R32_FLOAT
);
410 FORMAT_REPLACE(R32G32_SSCALED
, R32G32_FLOAT
);
411 FORMAT_REPLACE(R32G32B32_SSCALED
, R32G32B32_FLOAT
);
412 FORMAT_REPLACE(R32G32B32A32_SSCALED
,R32G32B32A32_FLOAT
);
417 native_attribs
[i
].src_format
= format
;
418 ve
->native_format
[i
] = format
;
419 ve
->native_format_size
[i
] =
420 util_format_get_blocksize(ve
->native_format
[i
]);
422 ve
->incompatible_layout_elem
[i
] =
423 ve
->ve
[i
].src_format
!= ve
->native_format
[i
] ||
424 (!mgr
->b
.caps
.fetch_dword_unaligned
&& ve
->ve
[i
].src_offset
% 4 != 0);
425 ve
->incompatible_layout
=
426 ve
->incompatible_layout
||
427 ve
->incompatible_layout_elem
[i
];
430 /* Align the formats to the size of DWORD if needed. */
431 if (!mgr
->b
.caps
.fetch_dword_unaligned
) {
432 for (i
= 0; i
< count
; i
++) {
433 ve
->native_format_size
[i
] = align(ve
->native_format_size
[i
], 4);
440 void u_vbuf_bind_vertex_elements(struct u_vbuf_mgr
*mgrb
,
442 struct u_vbuf_elements
*ve
)
444 struct u_vbuf_priv
*mgr
= (struct u_vbuf_priv
*)mgrb
;
450 if (!mgr
->ve_binding_lock
) {
456 void u_vbuf_destroy_vertex_elements(struct u_vbuf_mgr
*mgr
,
457 struct u_vbuf_elements
*ve
)
462 void u_vbuf_set_vertex_buffers(struct u_vbuf_mgr
*mgrb
,
464 const struct pipe_vertex_buffer
*bufs
)
466 struct u_vbuf_priv
*mgr
= (struct u_vbuf_priv
*)mgrb
;
469 mgr
->any_user_vbs
= FALSE
;
470 mgr
->incompatible_vb_layout
= FALSE
;
471 memset(mgr
->incompatible_vb
, 0, sizeof(mgr
->incompatible_vb
));
473 if (!mgr
->b
.caps
.fetch_dword_unaligned
) {
474 /* Check if the strides and offsets are aligned to the size of DWORD. */
475 for (i
= 0; i
< count
; i
++) {
476 if (bufs
[i
].buffer
) {
477 if (bufs
[i
].stride
% 4 != 0 ||
478 bufs
[i
].buffer_offset
% 4 != 0) {
479 mgr
->incompatible_vb_layout
= TRUE
;
480 mgr
->incompatible_vb
[i
] = TRUE
;
486 for (i
= 0; i
< count
; i
++) {
487 const struct pipe_vertex_buffer
*vb
= &bufs
[i
];
489 pipe_resource_reference(&mgr
->b
.vertex_buffer
[i
].buffer
, vb
->buffer
);
491 mgr
->b
.real_vertex_buffer
[i
].buffer_offset
=
492 mgr
->b
.vertex_buffer
[i
].buffer_offset
= vb
->buffer_offset
;
494 mgr
->b
.real_vertex_buffer
[i
].stride
=
495 mgr
->b
.vertex_buffer
[i
].stride
= vb
->stride
;
498 pipe_resource_reference(&mgr
->b
.real_vertex_buffer
[i
].buffer
, NULL
);
502 if (u_vbuf_resource(vb
->buffer
)->user_ptr
) {
503 pipe_resource_reference(&mgr
->b
.real_vertex_buffer
[i
].buffer
, NULL
);
504 mgr
->any_user_vbs
= TRUE
;
508 pipe_resource_reference(&mgr
->b
.real_vertex_buffer
[i
].buffer
, vb
->buffer
);
511 for (i
= count
; i
< mgr
->b
.nr_vertex_buffers
; i
++) {
512 pipe_resource_reference(&mgr
->b
.vertex_buffer
[i
].buffer
, NULL
);
514 for (i
= count
; i
< mgr
->b
.nr_real_vertex_buffers
; i
++) {
515 pipe_resource_reference(&mgr
->b
.real_vertex_buffer
[i
].buffer
, NULL
);
518 mgr
->b
.nr_vertex_buffers
= count
;
519 mgr
->b
.nr_real_vertex_buffers
= count
;
522 void u_vbuf_set_index_buffer(struct u_vbuf_mgr
*mgr
,
523 const struct pipe_index_buffer
*ib
)
525 if (ib
&& ib
->buffer
) {
526 assert(ib
->offset
% ib
->index_size
== 0);
527 pipe_resource_reference(&mgr
->index_buffer
.buffer
, ib
->buffer
);
528 mgr
->index_buffer
.offset
= ib
->offset
;
529 mgr
->index_buffer
.index_size
= ib
->index_size
;
531 pipe_resource_reference(&mgr
->index_buffer
.buffer
, NULL
);
536 u_vbuf_upload_buffers(struct u_vbuf_priv
*mgr
,
537 int min_index
, int max_index
,
538 unsigned instance_count
)
541 unsigned count
= max_index
+ 1 - min_index
;
542 unsigned nr_velems
= mgr
->ve
->count
;
543 unsigned nr_vbufs
= mgr
->b
.nr_vertex_buffers
;
544 struct pipe_vertex_element
*velems
=
545 mgr
->fallback_ve
? mgr
->fallback_velems
: mgr
->ve
->ve
;
546 unsigned start_offset
[PIPE_MAX_ATTRIBS
];
547 unsigned end_offset
[PIPE_MAX_ATTRIBS
] = {0};
549 /* Determine how much data needs to be uploaded. */
550 for (i
= 0; i
< nr_velems
; i
++) {
551 struct pipe_vertex_element
*velem
= &velems
[i
];
552 unsigned index
= velem
->vertex_buffer_index
;
553 struct pipe_vertex_buffer
*vb
= &mgr
->b
.vertex_buffer
[index
];
554 unsigned instance_div
, first
, size
;
556 /* Skip the buffer generated by translate. */
557 if (index
== mgr
->fallback_vb_slot
) {
563 if (!u_vbuf_resource(vb
->buffer
)->user_ptr
) {
567 instance_div
= velem
->instance_divisor
;
568 first
= vb
->buffer_offset
+ velem
->src_offset
;
571 /* Constant attrib. */
572 size
= mgr
->ve
->src_format_size
[i
];
573 } else if (instance_div
) {
574 /* Per-instance attrib. */
575 unsigned count
= (instance_count
+ instance_div
- 1) / instance_div
;
576 size
= vb
->stride
* (count
- 1) + mgr
->ve
->src_format_size
[i
];
578 /* Per-vertex attrib. */
579 first
+= vb
->stride
* min_index
;
580 size
= vb
->stride
* (count
- 1) + mgr
->ve
->src_format_size
[i
];
583 /* Update offsets. */
584 if (!end_offset
[index
]) {
585 start_offset
[index
] = first
;
586 end_offset
[index
] = first
+ size
;
588 if (first
< start_offset
[index
])
589 start_offset
[index
] = first
;
590 if (first
+ size
> end_offset
[index
])
591 end_offset
[index
] = first
+ size
;
595 /* Upload buffers. */
596 for (i
= 0; i
< nr_vbufs
; i
++) {
597 unsigned start
, end
= end_offset
[i
];
599 struct pipe_vertex_buffer
*real_vb
;
606 start
= start_offset
[i
];
609 real_vb
= &mgr
->b
.real_vertex_buffer
[i
];
610 ptr
= u_vbuf_resource(mgr
->b
.vertex_buffer
[i
].buffer
)->user_ptr
;
612 u_upload_data(mgr
->b
.uploader
, start
, end
- start
, ptr
+ start
,
613 &real_vb
->buffer_offset
, &real_vb
->buffer
, &flushed
);
615 real_vb
->buffer_offset
-= start
;
619 unsigned u_vbuf_draw_max_vertex_count(struct u_vbuf_mgr
*mgrb
)
621 struct u_vbuf_priv
*mgr
= (struct u_vbuf_priv
*)mgrb
;
622 unsigned i
, nr
= mgr
->ve
->count
;
623 struct pipe_vertex_element
*velems
=
624 mgr
->fallback_ve
? mgr
->fallback_velems
: mgr
->ve
->ve
;
625 unsigned result
= ~0;
627 for (i
= 0; i
< nr
; i
++) {
628 struct pipe_vertex_buffer
*vb
=
629 &mgr
->b
.real_vertex_buffer
[velems
[i
].vertex_buffer_index
];
630 unsigned size
, max_count
, value
;
632 /* We're not interested in constant and per-instance attribs. */
635 velems
[i
].instance_divisor
) {
639 size
= vb
->buffer
->width0
;
641 /* Subtract buffer_offset. */
642 value
= vb
->buffer_offset
;
648 /* Subtract src_offset. */
649 value
= velems
[i
].src_offset
;
655 /* Subtract format_size. */
656 value
= mgr
->ve
->native_format_size
[i
];
662 /* Compute the max count. */
663 max_count
= 1 + size
/ vb
->stride
;
664 result
= MIN2(result
, max_count
);
669 static boolean
u_vbuf_need_minmax_index(struct u_vbuf_priv
*mgr
)
671 unsigned i
, nr
= mgr
->ve
->count
;
673 for (i
= 0; i
< nr
; i
++) {
674 struct pipe_vertex_buffer
*vb
;
677 /* Per-instance attribs don't need min/max_index. */
678 if (mgr
->ve
->ve
[i
].instance_divisor
) {
682 index
= mgr
->ve
->ve
[i
].vertex_buffer_index
;
683 vb
= &mgr
->b
.vertex_buffer
[index
];
685 /* Constant attribs don't need min/max_index. */
690 /* Per-vertex attribs need min/max_index. */
691 if (u_vbuf_resource(vb
->buffer
)->user_ptr
||
692 mgr
->ve
->incompatible_layout_elem
[i
] ||
693 mgr
->incompatible_vb
[index
]) {
701 static void u_vbuf_get_minmax_index(struct pipe_context
*pipe
,
702 struct pipe_index_buffer
*ib
,
703 const struct pipe_draw_info
*info
,
707 struct pipe_transfer
*transfer
= NULL
;
710 unsigned restart_index
= info
->restart_index
;
712 if (u_vbuf_resource(ib
->buffer
)->user_ptr
) {
713 indices
= u_vbuf_resource(ib
->buffer
)->user_ptr
+
714 ib
->offset
+ info
->start
* ib
->index_size
;
716 indices
= pipe_buffer_map_range(pipe
, ib
->buffer
,
717 ib
->offset
+ info
->start
* ib
->index_size
,
718 info
->count
* ib
->index_size
,
719 PIPE_TRANSFER_READ
, &transfer
);
722 switch (ib
->index_size
) {
724 const unsigned *ui_indices
= (const unsigned*)indices
;
726 unsigned min_ui
= ~0U;
727 if (info
->primitive_restart
) {
728 for (i
= 0; i
< info
->count
; i
++) {
729 if (ui_indices
[i
] != restart_index
) {
730 if (ui_indices
[i
] > max_ui
) max_ui
= ui_indices
[i
];
731 if (ui_indices
[i
] < min_ui
) min_ui
= ui_indices
[i
];
736 for (i
= 0; i
< info
->count
; i
++) {
737 if (ui_indices
[i
] > max_ui
) max_ui
= ui_indices
[i
];
738 if (ui_indices
[i
] < min_ui
) min_ui
= ui_indices
[i
];
741 *out_min_index
= min_ui
;
742 *out_max_index
= max_ui
;
746 const unsigned short *us_indices
= (const unsigned short*)indices
;
748 unsigned min_us
= ~0U;
749 if (info
->primitive_restart
) {
750 for (i
= 0; i
< info
->count
; i
++) {
751 if (us_indices
[i
] != restart_index
) {
752 if (us_indices
[i
] > max_us
) max_us
= us_indices
[i
];
753 if (us_indices
[i
] < min_us
) min_us
= us_indices
[i
];
758 for (i
= 0; i
< info
->count
; i
++) {
759 if (us_indices
[i
] > max_us
) max_us
= us_indices
[i
];
760 if (us_indices
[i
] < min_us
) min_us
= us_indices
[i
];
763 *out_min_index
= min_us
;
764 *out_max_index
= max_us
;
768 const unsigned char *ub_indices
= (const unsigned char*)indices
;
770 unsigned min_ub
= ~0U;
771 if (info
->primitive_restart
) {
772 for (i
= 0; i
< info
->count
; i
++) {
773 if (ub_indices
[i
] != restart_index
) {
774 if (ub_indices
[i
] > max_ub
) max_ub
= ub_indices
[i
];
775 if (ub_indices
[i
] < min_ub
) min_ub
= ub_indices
[i
];
780 for (i
= 0; i
< info
->count
; i
++) {
781 if (ub_indices
[i
] > max_ub
) max_ub
= ub_indices
[i
];
782 if (ub_indices
[i
] < min_ub
) min_ub
= ub_indices
[i
];
785 *out_min_index
= min_ub
;
786 *out_max_index
= max_ub
;
794 pipe_buffer_unmap(pipe
, transfer
);
798 enum u_vbuf_return_flags
799 u_vbuf_draw_begin(struct u_vbuf_mgr
*mgrb
,
800 const struct pipe_draw_info
*info
)
802 struct u_vbuf_priv
*mgr
= (struct u_vbuf_priv
*)mgrb
;
803 int min_index
, max_index
;
805 if (!mgr
->incompatible_vb_layout
&&
806 !mgr
->ve
->incompatible_layout
&&
807 !mgr
->any_user_vbs
) {
812 if (info
->max_index
!= ~0) {
813 min_index
= info
->min_index
+ info
->index_bias
;
814 max_index
= info
->max_index
+ info
->index_bias
;
815 } else if (u_vbuf_need_minmax_index(mgr
)) {
816 u_vbuf_get_minmax_index(mgr
->pipe
, &mgr
->b
.index_buffer
, info
,
817 &min_index
, &max_index
);
818 min_index
+= info
->index_bias
;
819 max_index
+= info
->index_bias
;
825 min_index
= info
->start
;
826 max_index
= info
->start
+ info
->count
- 1;
829 /* Translate vertices with non-native layouts or formats. */
830 if (mgr
->incompatible_vb_layout
|| mgr
->ve
->incompatible_layout
) {
831 u_vbuf_translate_begin(mgr
, min_index
, max_index
);
834 /* Upload user buffers. */
835 if (mgr
->any_user_vbs
) {
836 u_vbuf_upload_buffers(mgr
, min_index
, max_index
, info
->instance_count
);
838 return U_VBUF_BUFFERS_UPDATED
;
841 void u_vbuf_draw_end(struct u_vbuf_mgr
*mgrb
)
843 struct u_vbuf_priv
*mgr
= (struct u_vbuf_priv
*)mgrb
;
845 if (mgr
->fallback_ve
) {
846 u_vbuf_translate_end(mgr
);