u_vbuf_mgr: dereference some pointers only once etc.
[mesa.git] / src / gallium / auxiliary / util / u_vbuf_mgr.c
1 /**************************************************************************
2 *
3 * Copyright 2011 Marek Olšák <maraeo@gmail.com>
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "util/u_vbuf_mgr.h"
29
30 #include "util/u_format.h"
31 #include "util/u_inlines.h"
32 #include "util/u_memory.h"
33 #include "util/u_upload_mgr.h"
34 #include "translate/translate.h"
35 #include "translate/translate_cache.h"
36
37 struct u_vbuf_elements {
38 unsigned count;
39 struct pipe_vertex_element ve[PIPE_MAX_ATTRIBS];
40
41 unsigned src_format_size[PIPE_MAX_ATTRIBS];
42
43 /* If (velem[i].src_format != native_format[i]), the vertex buffer
44 * referenced by the vertex element cannot be used for rendering and
45 * its vertex data must be translated to native_format[i]. */
46 enum pipe_format native_format[PIPE_MAX_ATTRIBS];
47 unsigned native_format_size[PIPE_MAX_ATTRIBS];
48
49 /* This might mean two things:
50 * - src_format != native_format, as discussed above.
51 * - src_offset % 4 != 0 (if the caps don't allow such an offset). */
52 boolean incompatible_layout;
53 };
54
55 struct u_vbuf_priv {
56 struct u_vbuf_mgr b;
57 struct pipe_context *pipe;
58
59 struct translate_cache *translate_cache;
60 unsigned translate_vb_slot;
61
62 struct u_vbuf_elements *ve;
63 void *saved_ve, *fallback_ve;
64 boolean ve_binding_lock;
65
66 boolean any_user_vbs;
67 boolean incompatible_vb_layout;
68 };
69
70 static void u_vbuf_init_format_caps(struct u_vbuf_priv *mgr)
71 {
72 struct pipe_screen *screen = mgr->pipe->screen;
73
74 mgr->b.caps.format_fixed32 =
75 screen->is_format_supported(screen, PIPE_FORMAT_R32_FIXED, PIPE_BUFFER,
76 0, PIPE_BIND_VERTEX_BUFFER);
77
78 mgr->b.caps.format_float16 =
79 screen->is_format_supported(screen, PIPE_FORMAT_R16_FLOAT, PIPE_BUFFER,
80 0, PIPE_BIND_VERTEX_BUFFER);
81
82 mgr->b.caps.format_float64 =
83 screen->is_format_supported(screen, PIPE_FORMAT_R64_FLOAT, PIPE_BUFFER,
84 0, PIPE_BIND_VERTEX_BUFFER);
85
86 mgr->b.caps.format_norm32 =
87 screen->is_format_supported(screen, PIPE_FORMAT_R32_UNORM, PIPE_BUFFER,
88 0, PIPE_BIND_VERTEX_BUFFER) &&
89 screen->is_format_supported(screen, PIPE_FORMAT_R32_SNORM, PIPE_BUFFER,
90 0, PIPE_BIND_VERTEX_BUFFER);
91
92 mgr->b.caps.format_scaled32 =
93 screen->is_format_supported(screen, PIPE_FORMAT_R32_USCALED, PIPE_BUFFER,
94 0, PIPE_BIND_VERTEX_BUFFER) &&
95 screen->is_format_supported(screen, PIPE_FORMAT_R32_SSCALED, PIPE_BUFFER,
96 0, PIPE_BIND_VERTEX_BUFFER);
97 }
98
99 struct u_vbuf_mgr *
100 u_vbuf_create(struct pipe_context *pipe,
101 unsigned upload_buffer_size,
102 unsigned upload_buffer_alignment,
103 unsigned upload_buffer_bind,
104 enum u_fetch_alignment fetch_alignment)
105 {
106 struct u_vbuf_priv *mgr = CALLOC_STRUCT(u_vbuf_priv);
107
108 mgr->pipe = pipe;
109 mgr->translate_cache = translate_cache_create();
110
111 mgr->b.uploader = u_upload_create(pipe, upload_buffer_size,
112 upload_buffer_alignment,
113 upload_buffer_bind);
114
115 mgr->b.caps.fetch_dword_unaligned =
116 fetch_alignment == U_VERTEX_FETCH_BYTE_ALIGNED;
117
118 u_vbuf_init_format_caps(mgr);
119
120 return &mgr->b;
121 }
122
123 void u_vbuf_destroy(struct u_vbuf_mgr *mgrb)
124 {
125 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb;
126 unsigned i;
127
128 for (i = 0; i < mgr->b.nr_vertex_buffers; i++) {
129 pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL);
130 }
131 for (i = 0; i < mgr->b.nr_real_vertex_buffers; i++) {
132 pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL);
133 }
134
135 translate_cache_destroy(mgr->translate_cache);
136 u_upload_destroy(mgr->b.uploader);
137 FREE(mgr);
138 }
139
140
141 static void
142 u_vbuf_translate_begin(struct u_vbuf_priv *mgr,
143 int min_index, int max_index)
144 {
145 struct translate_key key;
146 struct translate_element *te;
147 unsigned tr_elem_index[PIPE_MAX_ATTRIBS];
148 struct translate *tr;
149 boolean vb_translated[PIPE_MAX_ATTRIBS] = {0};
150 uint8_t *vb_map[PIPE_MAX_ATTRIBS] = {0}, *out_map;
151 struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0};
152 struct pipe_resource *out_buffer = NULL;
153 unsigned i, num_verts, out_offset;
154 struct pipe_vertex_element new_velems[PIPE_MAX_ATTRIBS];
155 boolean upload_flushed = FALSE;
156
157 memset(&key, 0, sizeof(key));
158 memset(tr_elem_index, 0xff, sizeof(tr_elem_index));
159
160 /* Initialize the translate key, i.e. the recipe how vertices should be
161 * translated. */
162 memset(&key, 0, sizeof key);
163 for (i = 0; i < mgr->ve->count; i++) {
164 struct pipe_vertex_buffer *vb =
165 &mgr->b.vertex_buffer[mgr->ve->ve[i].vertex_buffer_index];
166 enum pipe_format output_format = mgr->ve->native_format[i];
167 unsigned output_format_size = mgr->ve->native_format_size[i];
168
169 /* Check for support. */
170 if (mgr->ve->ve[i].src_format == mgr->ve->native_format[i] &&
171 (mgr->b.caps.fetch_dword_unaligned ||
172 (vb->buffer_offset % 4 == 0 &&
173 vb->stride % 4 == 0 &&
174 mgr->ve->ve[i].src_offset % 4 == 0))) {
175 continue;
176 }
177
178 /* Workaround for translate: output floats instead of halfs. */
179 switch (output_format) {
180 case PIPE_FORMAT_R16_FLOAT:
181 output_format = PIPE_FORMAT_R32_FLOAT;
182 output_format_size = 4;
183 break;
184 case PIPE_FORMAT_R16G16_FLOAT:
185 output_format = PIPE_FORMAT_R32G32_FLOAT;
186 output_format_size = 8;
187 break;
188 case PIPE_FORMAT_R16G16B16_FLOAT:
189 output_format = PIPE_FORMAT_R32G32B32_FLOAT;
190 output_format_size = 12;
191 break;
192 case PIPE_FORMAT_R16G16B16A16_FLOAT:
193 output_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
194 output_format_size = 16;
195 break;
196 default:;
197 }
198
199 /* Add this vertex element. */
200 te = &key.element[key.nr_elements];
201 /*te->type;
202 te->instance_divisor;*/
203 te->input_buffer = mgr->ve->ve[i].vertex_buffer_index;
204 te->input_format = mgr->ve->ve[i].src_format;
205 te->input_offset = mgr->ve->ve[i].src_offset;
206 te->output_format = output_format;
207 te->output_offset = key.output_stride;
208
209 key.output_stride += output_format_size;
210 vb_translated[mgr->ve->ve[i].vertex_buffer_index] = TRUE;
211 tr_elem_index[i] = key.nr_elements;
212 key.nr_elements++;
213 }
214
215 /* Get a translate object. */
216 tr = translate_cache_find(mgr->translate_cache, &key);
217
218 /* Map buffers we want to translate. */
219 for (i = 0; i < mgr->b.nr_vertex_buffers; i++) {
220 if (vb_translated[i]) {
221 struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[i];
222
223 vb_map[i] = pipe_buffer_map(mgr->pipe, vb->buffer,
224 PIPE_TRANSFER_READ, &vb_transfer[i]);
225
226 tr->set_buffer(tr, i,
227 vb_map[i] + vb->buffer_offset + vb->stride * min_index,
228 vb->stride, ~0);
229 }
230 }
231
232 /* Create and map the output buffer. */
233 num_verts = max_index + 1 - min_index;
234
235 u_upload_alloc(mgr->b.uploader,
236 key.output_stride * min_index,
237 key.output_stride * num_verts,
238 &out_offset, &out_buffer, &upload_flushed,
239 (void**)&out_map);
240
241 out_offset -= key.output_stride * min_index;
242
243 /* Translate. */
244 tr->run(tr, 0, num_verts, 0, out_map);
245
246 /* Unmap all buffers. */
247 for (i = 0; i < mgr->b.nr_vertex_buffers; i++) {
248 if (vb_translated[i]) {
249 pipe_buffer_unmap(mgr->pipe, vb_transfer[i]);
250 }
251 }
252
253 /* Setup the new vertex buffer in the first free slot. */
254 mgr->translate_vb_slot = ~0;
255 for (i = 0; i < PIPE_MAX_ATTRIBS; i++) {
256 if (!mgr->b.vertex_buffer[i].buffer) {
257 mgr->translate_vb_slot = i;
258
259 if (i >= mgr->b.nr_vertex_buffers) {
260 mgr->b.nr_real_vertex_buffers = i+1;
261 }
262 break;
263 }
264 }
265
266 if (mgr->translate_vb_slot != ~0) {
267 /* Setup the new vertex buffer. */
268 pipe_resource_reference(
269 &mgr->b.real_vertex_buffer[mgr->translate_vb_slot].buffer, out_buffer);
270 mgr->b.real_vertex_buffer[mgr->translate_vb_slot].buffer_offset = out_offset;
271 mgr->b.real_vertex_buffer[mgr->translate_vb_slot].stride = key.output_stride;
272
273 /* Setup new vertex elements. */
274 for (i = 0; i < mgr->ve->count; i++) {
275 if (tr_elem_index[i] < key.nr_elements) {
276 te = &key.element[tr_elem_index[i]];
277 new_velems[i].instance_divisor = mgr->ve->ve[i].instance_divisor;
278 new_velems[i].src_format = te->output_format;
279 new_velems[i].src_offset = te->output_offset;
280 new_velems[i].vertex_buffer_index = mgr->translate_vb_slot;
281 } else {
282 memcpy(&new_velems[i], &mgr->ve->ve[i],
283 sizeof(struct pipe_vertex_element));
284 }
285 }
286
287 mgr->fallback_ve =
288 mgr->pipe->create_vertex_elements_state(mgr->pipe, mgr->ve->count,
289 new_velems);
290
291 /* Preserve saved_ve. */
292 mgr->ve_binding_lock = TRUE;
293 mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->fallback_ve);
294 mgr->ve_binding_lock = FALSE;
295 }
296
297 pipe_resource_reference(&out_buffer, NULL);
298 }
299
300 static void u_vbuf_translate_end(struct u_vbuf_priv *mgr)
301 {
302 if (mgr->fallback_ve == NULL) {
303 return;
304 }
305
306 /* Restore vertex elements. */
307 /* Note that saved_ve will be overwritten in bind_vertex_elements_state. */
308 mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->saved_ve);
309 mgr->pipe->delete_vertex_elements_state(mgr->pipe, mgr->fallback_ve);
310 mgr->fallback_ve = NULL;
311
312 /* Delete the now-unused VBO. */
313 pipe_resource_reference(&mgr->b.real_vertex_buffer[mgr->translate_vb_slot].buffer,
314 NULL);
315 mgr->b.nr_real_vertex_buffers = mgr->b.nr_vertex_buffers;
316 }
317
318 #define FORMAT_REPLACE(what, withwhat) \
319 case PIPE_FORMAT_##what: format = PIPE_FORMAT_##withwhat; break
320
321 struct u_vbuf_elements *
322 u_vbuf_create_vertex_elements(struct u_vbuf_mgr *mgrb,
323 unsigned count,
324 const struct pipe_vertex_element *attribs,
325 struct pipe_vertex_element *native_attribs)
326 {
327 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb;
328 unsigned i;
329 struct u_vbuf_elements *ve = CALLOC_STRUCT(u_vbuf_elements);
330
331 ve->count = count;
332
333 if (!count) {
334 return ve;
335 }
336
337 memcpy(ve->ve, attribs, sizeof(struct pipe_vertex_element) * count);
338 memcpy(native_attribs, attribs, sizeof(struct pipe_vertex_element) * count);
339
340 /* Set the best native format in case the original format is not
341 * supported. */
342 for (i = 0; i < count; i++) {
343 enum pipe_format format = ve->ve[i].src_format;
344
345 ve->src_format_size[i] = util_format_get_blocksize(format);
346
347 /* Choose a native format.
348 * For now we don't care about the alignment, that's going to
349 * be sorted out later. */
350 if (!mgr->b.caps.format_fixed32) {
351 switch (format) {
352 FORMAT_REPLACE(R32_FIXED, R32_FLOAT);
353 FORMAT_REPLACE(R32G32_FIXED, R32G32_FLOAT);
354 FORMAT_REPLACE(R32G32B32_FIXED, R32G32B32_FLOAT);
355 FORMAT_REPLACE(R32G32B32A32_FIXED, R32G32B32A32_FLOAT);
356 default:;
357 }
358 }
359 if (!mgr->b.caps.format_float16) {
360 switch (format) {
361 FORMAT_REPLACE(R16_FLOAT, R32_FLOAT);
362 FORMAT_REPLACE(R16G16_FLOAT, R32G32_FLOAT);
363 FORMAT_REPLACE(R16G16B16_FLOAT, R32G32B32_FLOAT);
364 FORMAT_REPLACE(R16G16B16A16_FLOAT, R32G32B32A32_FLOAT);
365 default:;
366 }
367 }
368 if (!mgr->b.caps.format_float64) {
369 switch (format) {
370 FORMAT_REPLACE(R64_FLOAT, R32_FLOAT);
371 FORMAT_REPLACE(R64G64_FLOAT, R32G32_FLOAT);
372 FORMAT_REPLACE(R64G64B64_FLOAT, R32G32B32_FLOAT);
373 FORMAT_REPLACE(R64G64B64A64_FLOAT, R32G32B32A32_FLOAT);
374 default:;
375 }
376 }
377 if (!mgr->b.caps.format_norm32) {
378 switch (format) {
379 FORMAT_REPLACE(R32_UNORM, R32_FLOAT);
380 FORMAT_REPLACE(R32G32_UNORM, R32G32_FLOAT);
381 FORMAT_REPLACE(R32G32B32_UNORM, R32G32B32_FLOAT);
382 FORMAT_REPLACE(R32G32B32A32_UNORM, R32G32B32A32_FLOAT);
383 FORMAT_REPLACE(R32_SNORM, R32_FLOAT);
384 FORMAT_REPLACE(R32G32_SNORM, R32G32_FLOAT);
385 FORMAT_REPLACE(R32G32B32_SNORM, R32G32B32_FLOAT);
386 FORMAT_REPLACE(R32G32B32A32_SNORM, R32G32B32A32_FLOAT);
387 default:;
388 }
389 }
390 if (!mgr->b.caps.format_scaled32) {
391 switch (format) {
392 FORMAT_REPLACE(R32_USCALED, R32_FLOAT);
393 FORMAT_REPLACE(R32G32_USCALED, R32G32_FLOAT);
394 FORMAT_REPLACE(R32G32B32_USCALED, R32G32B32_FLOAT);
395 FORMAT_REPLACE(R32G32B32A32_USCALED,R32G32B32A32_FLOAT);
396 FORMAT_REPLACE(R32_SSCALED, R32_FLOAT);
397 FORMAT_REPLACE(R32G32_SSCALED, R32G32_FLOAT);
398 FORMAT_REPLACE(R32G32B32_SSCALED, R32G32B32_FLOAT);
399 FORMAT_REPLACE(R32G32B32A32_SSCALED,R32G32B32A32_FLOAT);
400 default:;
401 }
402 }
403
404 native_attribs[i].src_format = format;
405 ve->native_format[i] = format;
406 ve->native_format_size[i] =
407 util_format_get_blocksize(ve->native_format[i]);
408
409 ve->incompatible_layout =
410 ve->incompatible_layout ||
411 ve->ve[i].src_format != ve->native_format[i] ||
412 (!mgr->b.caps.fetch_dword_unaligned && ve->ve[i].src_offset % 4 != 0);
413 }
414
415 /* Align the formats to the size of DWORD if needed. */
416 if (!mgr->b.caps.fetch_dword_unaligned) {
417 for (i = 0; i < count; i++) {
418 ve->native_format_size[i] = align(ve->native_format_size[i], 4);
419 }
420 }
421
422 return ve;
423 }
424
425 void u_vbuf_bind_vertex_elements(struct u_vbuf_mgr *mgrb,
426 void *cso,
427 struct u_vbuf_elements *ve)
428 {
429 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb;
430
431 if (!cso) {
432 return;
433 }
434
435 if (!mgr->ve_binding_lock) {
436 mgr->saved_ve = cso;
437 mgr->ve = ve;
438 }
439 }
440
441 void u_vbuf_destroy_vertex_elements(struct u_vbuf_mgr *mgr,
442 struct u_vbuf_elements *ve)
443 {
444 FREE(ve);
445 }
446
447 void u_vbuf_set_vertex_buffers(struct u_vbuf_mgr *mgrb,
448 unsigned count,
449 const struct pipe_vertex_buffer *bufs)
450 {
451 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb;
452 unsigned i;
453
454 mgr->any_user_vbs = FALSE;
455 mgr->incompatible_vb_layout = FALSE;
456
457 if (!mgr->b.caps.fetch_dword_unaligned) {
458 /* Check if the strides and offsets are aligned to the size of DWORD. */
459 for (i = 0; i < count; i++) {
460 if (bufs[i].buffer) {
461 if (bufs[i].stride % 4 != 0 ||
462 bufs[i].buffer_offset % 4 != 0) {
463 mgr->incompatible_vb_layout = TRUE;
464 break;
465 }
466 }
467 }
468 }
469
470 for (i = 0; i < count; i++) {
471 const struct pipe_vertex_buffer *vb = &bufs[i];
472
473 pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, vb->buffer);
474 pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL);
475
476 mgr->b.real_vertex_buffer[i].buffer_offset =
477 mgr->b.vertex_buffer[i].buffer_offset = vb->buffer_offset;
478
479 mgr->b.real_vertex_buffer[i].stride =
480 mgr->b.vertex_buffer[i].stride = vb->stride;
481
482 if (!vb->buffer) {
483 continue;
484 }
485
486 if (u_vbuf_resource(vb->buffer)->user_ptr) {
487 mgr->any_user_vbs = TRUE;
488 continue;
489 }
490
491 pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, vb->buffer);
492 }
493
494 for (i = count; i < mgr->b.nr_vertex_buffers; i++) {
495 pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL);
496 }
497 for (i = count; i < mgr->b.nr_real_vertex_buffers; i++) {
498 pipe_resource_reference(&mgr->b.real_vertex_buffer[i].buffer, NULL);
499 }
500
501 mgr->b.nr_vertex_buffers = count;
502 mgr->b.nr_real_vertex_buffers = count;
503 }
504
505 static void
506 u_vbuf_upload_buffers(struct u_vbuf_priv *mgr,
507 int min_index, int max_index,
508 unsigned instance_count)
509 {
510 unsigned i;
511 unsigned count = max_index + 1 - min_index;
512 unsigned nr_velems = mgr->ve->count;
513 unsigned nr_vbufs = mgr->b.nr_vertex_buffers;
514 unsigned start_offset[PIPE_MAX_ATTRIBS];
515 unsigned end_offset[PIPE_MAX_ATTRIBS] = {0};
516
517 /* Determine how much data needs to be uploaded. */
518 for (i = 0; i < nr_velems; i++) {
519 struct pipe_vertex_element *velem = &mgr->ve->ve[i];
520 unsigned index = velem->vertex_buffer_index;
521 struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[index];
522 unsigned instance_div, first, size;
523
524 assert(vb->buffer);
525
526 if (!u_vbuf_resource(vb->buffer)->user_ptr) {
527 continue;
528 }
529
530 instance_div = velem->instance_divisor;
531 first = vb->buffer_offset + velem->src_offset;
532
533 if (!vb->stride) {
534 /* Constant attrib. */
535 size = mgr->ve->src_format_size[i];
536 } else if (instance_div) {
537 /* Per-instance attrib. */
538 unsigned count = (instance_count + instance_div - 1) / instance_div;
539 size = vb->stride * (count - 1) + mgr->ve->src_format_size[i];
540 } else {
541 /* Per-vertex attrib. */
542 first += vb->stride * min_index;
543 size = vb->stride * (count - 1) + mgr->ve->src_format_size[i];
544 }
545
546 /* Update offsets. */
547 if (!end_offset[index]) {
548 start_offset[index] = first;
549 end_offset[index] = first + size;
550 } else {
551 if (first < start_offset[index])
552 start_offset[index] = first;
553 if (first + size > end_offset[index])
554 end_offset[index] = first + size;
555 }
556 }
557
558 /* Upload buffers. */
559 for (i = 0; i < nr_vbufs; i++) {
560 unsigned start, end = end_offset[i];
561 boolean flushed;
562 struct pipe_vertex_buffer *real_vb;
563 uint8_t *ptr;
564
565 if (!end) {
566 continue;
567 }
568
569 start = start_offset[i];
570 assert(start < end);
571
572 real_vb = &mgr->b.real_vertex_buffer[i];
573 ptr = u_vbuf_resource(mgr->b.vertex_buffer[i].buffer)->user_ptr;
574
575 u_upload_data(mgr->b.uploader, start, end - start, ptr + start,
576 &real_vb->buffer_offset, &real_vb->buffer, &flushed);
577
578 real_vb->buffer_offset -= start;
579 }
580 }
581
582 static void u_vbuf_compute_max_index(struct u_vbuf_priv *mgr)
583 {
584 unsigned i, nr = mgr->ve->count;
585
586 mgr->b.max_index = ~0;
587
588 for (i = 0; i < nr; i++) {
589 struct pipe_vertex_buffer *vb =
590 &mgr->b.vertex_buffer[mgr->ve->ve[i].vertex_buffer_index];
591 unsigned max_index, src_size, unused;
592
593 if (!vb->buffer ||
594 !vb->stride ||
595 u_vbuf_resource(vb->buffer)->user_ptr ||
596 mgr->ve->ve[i].instance_divisor) {
597 continue;
598 }
599
600 src_size = mgr->ve->ve[i].src_offset + mgr->ve->src_format_size[i];
601
602 /* If src_offset is greater than stride (which means it's a buffer
603 * offset rather than a vertex offset)... */
604 if (src_size >= vb->stride) {
605 unused = 0;
606 } else {
607 /* How many bytes is unused after the last vertex.
608 * width0 may be "count*stride - unused" and we have to compensate
609 * for that when dividing by stride. */
610 unused = vb->stride - src_size;
611 }
612
613 /* Compute the maximum index for this vertex element. */
614 max_index =
615 (vb->buffer->width0 - vb->buffer_offset + (unsigned)unused) /
616 vb->stride - 1;
617
618 mgr->b.max_index = MIN2(mgr->b.max_index, max_index);
619 }
620 }
621
622 enum u_vbuf_return_flags
623 u_vbuf_draw_begin(struct u_vbuf_mgr *mgrb,
624 const struct pipe_draw_info *info)
625 {
626 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb;
627 int min_index, max_index;
628
629 u_vbuf_compute_max_index(mgr);
630
631 min_index = info->min_index - info->index_bias;
632 if (info->max_index == ~0) {
633 max_index = mgr->b.max_index;
634 } else {
635 max_index = MIN2(info->max_index - info->index_bias, mgr->b.max_index);
636 }
637
638 /* Translate vertices with non-native layouts or formats. */
639 if (mgr->incompatible_vb_layout || mgr->ve->incompatible_layout) {
640 u_vbuf_translate_begin(mgr, min_index, max_index);
641 }
642
643 /* Upload user buffers. */
644 if (mgr->any_user_vbs) {
645 u_vbuf_upload_buffers(mgr, min_index, max_index, info->instance_count);
646 }
647 return mgr->any_user_vbs || mgr->fallback_ve ? U_VBUF_BUFFERS_UPDATED : 0;
648 }
649
650 void u_vbuf_draw_end(struct u_vbuf_mgr *mgrb)
651 {
652 struct u_vbuf_priv *mgr = (struct u_vbuf_priv*)mgrb;
653
654 if (mgr->fallback_ve) {
655 u_vbuf_translate_end(mgr);
656 }
657 }