r600g: expose ARB_ES2_compatibility by claiming fixed-point format support
[mesa.git] / src / gallium / auxiliary / util / u_vbuf_mgr.c
1 /**************************************************************************
2 *
3 * Copyright 2011 Marek Olšák <maraeo@gmail.com>
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "util/u_vbuf_mgr.h"
29
30 #include "util/u_format.h"
31 #include "util/u_inlines.h"
32 #include "util/u_memory.h"
33 #include "util/u_upload_mgr.h"
34 #include "translate/translate.h"
35 #include "translate/translate_cache.h"
36
37 struct u_vbuf_mgr_elements {
38 unsigned count;
39 struct pipe_vertex_element ve[PIPE_MAX_ATTRIBS];
40
41 unsigned src_format_size[PIPE_MAX_ATTRIBS];
42
43 /* If (velem[i].src_format != native_format[i]), the vertex buffer
44 * referenced by the vertex element cannot be used for rendering and
45 * its vertex data must be translated to native_format[i]. */
46 enum pipe_format native_format[PIPE_MAX_ATTRIBS];
47 unsigned native_format_size[PIPE_MAX_ATTRIBS];
48
49 /* This might mean two things:
50 * - src_format != native_format, as discussed above.
51 * - src_offset % 4 != 0 (if the caps don't allow such an offset). */
52 boolean incompatible_layout;
53 };
54
55 struct u_vbuf_mgr_priv {
56 struct u_vbuf_mgr b;
57 struct pipe_context *pipe;
58
59 struct translate_cache *translate_cache;
60 unsigned translate_vb_slot;
61
62 struct u_vbuf_mgr_elements *ve;
63 void *saved_ve, *fallback_ve;
64 boolean ve_binding_lock;
65
66 unsigned saved_buffer_offset[PIPE_MAX_ATTRIBS];
67
68 boolean any_user_vbs;
69 boolean incompatible_vb_layout;
70 };
71
72 static void u_vbuf_mgr_init_format_caps(struct u_vbuf_mgr_priv *mgr)
73 {
74 struct pipe_screen *screen = mgr->pipe->screen;
75
76 mgr->b.caps.format_fixed32 =
77 screen->is_format_supported(screen, PIPE_FORMAT_R32_FIXED, PIPE_BUFFER,
78 0, PIPE_BIND_VERTEX_BUFFER);
79
80 mgr->b.caps.format_float16 =
81 screen->is_format_supported(screen, PIPE_FORMAT_R16_FLOAT, PIPE_BUFFER,
82 0, PIPE_BIND_VERTEX_BUFFER);
83
84 mgr->b.caps.format_float64 =
85 screen->is_format_supported(screen, PIPE_FORMAT_R64_FLOAT, PIPE_BUFFER,
86 0, PIPE_BIND_VERTEX_BUFFER);
87
88 mgr->b.caps.format_norm32 =
89 screen->is_format_supported(screen, PIPE_FORMAT_R32_UNORM, PIPE_BUFFER,
90 0, PIPE_BIND_VERTEX_BUFFER) &&
91 screen->is_format_supported(screen, PIPE_FORMAT_R32_SNORM, PIPE_BUFFER,
92 0, PIPE_BIND_VERTEX_BUFFER);
93
94 mgr->b.caps.format_scaled32 =
95 screen->is_format_supported(screen, PIPE_FORMAT_R32_USCALED, PIPE_BUFFER,
96 0, PIPE_BIND_VERTEX_BUFFER) &&
97 screen->is_format_supported(screen, PIPE_FORMAT_R32_SSCALED, PIPE_BUFFER,
98 0, PIPE_BIND_VERTEX_BUFFER);
99 }
100
101 struct u_vbuf_mgr *
102 u_vbuf_mgr_create(struct pipe_context *pipe,
103 unsigned upload_buffer_size,
104 unsigned upload_buffer_alignment,
105 unsigned upload_buffer_bind,
106 enum u_fetch_alignment fetch_alignment)
107 {
108 struct u_vbuf_mgr_priv *mgr = CALLOC_STRUCT(u_vbuf_mgr_priv);
109
110 mgr->pipe = pipe;
111 mgr->translate_cache = translate_cache_create();
112
113 mgr->b.uploader = u_upload_create(pipe, upload_buffer_size,
114 upload_buffer_alignment,
115 upload_buffer_bind);
116
117 mgr->b.caps.fetch_dword_unaligned =
118 fetch_alignment == U_VERTEX_FETCH_BYTE_ALIGNED;
119
120 u_vbuf_mgr_init_format_caps(mgr);
121
122 return &mgr->b;
123 }
124
125 void u_vbuf_mgr_destroy(struct u_vbuf_mgr *mgrb)
126 {
127 struct u_vbuf_mgr_priv *mgr = (struct u_vbuf_mgr_priv*)mgrb;
128 unsigned i;
129
130 for (i = 0; i < mgr->b.nr_real_vertex_buffers; i++) {
131 pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL);
132 pipe_resource_reference(&mgr->b.real_vertex_buffer[i], NULL);
133 }
134
135 translate_cache_destroy(mgr->translate_cache);
136 u_upload_destroy(mgr->b.uploader);
137 FREE(mgr);
138 }
139
140
141 static enum u_vbuf_return_flags
142 u_vbuf_translate_begin(struct u_vbuf_mgr_priv *mgr,
143 int min_index, int max_index)
144 {
145 struct translate_key key;
146 struct translate_element *te;
147 unsigned tr_elem_index[PIPE_MAX_ATTRIBS];
148 struct translate *tr;
149 boolean vb_translated[PIPE_MAX_ATTRIBS] = {0};
150 uint8_t *vb_map[PIPE_MAX_ATTRIBS] = {0}, *out_map;
151 struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0};
152 struct pipe_resource *out_buffer = NULL;
153 unsigned i, num_verts, out_offset;
154 struct pipe_vertex_element new_velems[PIPE_MAX_ATTRIBS];
155 boolean upload_flushed = FALSE;
156
157 memset(&key, 0, sizeof(key));
158 memset(tr_elem_index, 0xff, sizeof(tr_elem_index));
159
160 /* Initialize the translate key, i.e. the recipe how vertices should be
161 * translated. */
162 memset(&key, 0, sizeof key);
163 for (i = 0; i < mgr->ve->count; i++) {
164 struct pipe_vertex_buffer *vb =
165 &mgr->b.vertex_buffer[mgr->ve->ve[i].vertex_buffer_index];
166 enum pipe_format output_format = mgr->ve->native_format[i];
167 unsigned output_format_size = mgr->ve->native_format_size[i];
168
169 /* Check for support. */
170 if (mgr->ve->ve[i].src_format == mgr->ve->native_format[i] &&
171 (mgr->b.caps.fetch_dword_unaligned ||
172 (vb->buffer_offset % 4 == 0 &&
173 vb->stride % 4 == 0 &&
174 mgr->ve->ve[i].src_offset % 4 == 0))) {
175 continue;
176 }
177
178 /* Workaround for translate: output floats instead of halfs. */
179 switch (output_format) {
180 case PIPE_FORMAT_R16_FLOAT:
181 output_format = PIPE_FORMAT_R32_FLOAT;
182 output_format_size = 4;
183 break;
184 case PIPE_FORMAT_R16G16_FLOAT:
185 output_format = PIPE_FORMAT_R32G32_FLOAT;
186 output_format_size = 8;
187 break;
188 case PIPE_FORMAT_R16G16B16_FLOAT:
189 output_format = PIPE_FORMAT_R32G32B32_FLOAT;
190 output_format_size = 12;
191 break;
192 case PIPE_FORMAT_R16G16B16A16_FLOAT:
193 output_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
194 output_format_size = 16;
195 break;
196 default:;
197 }
198
199 /* Add this vertex element. */
200 te = &key.element[key.nr_elements];
201 /*te->type;
202 te->instance_divisor;*/
203 te->input_buffer = mgr->ve->ve[i].vertex_buffer_index;
204 te->input_format = mgr->ve->ve[i].src_format;
205 te->input_offset = mgr->ve->ve[i].src_offset;
206 te->output_format = output_format;
207 te->output_offset = key.output_stride;
208
209 key.output_stride += output_format_size;
210 vb_translated[mgr->ve->ve[i].vertex_buffer_index] = TRUE;
211 tr_elem_index[i] = key.nr_elements;
212 key.nr_elements++;
213 }
214
215 /* Get a translate object. */
216 tr = translate_cache_find(mgr->translate_cache, &key);
217
218 /* Map buffers we want to translate. */
219 for (i = 0; i < mgr->b.nr_vertex_buffers; i++) {
220 if (vb_translated[i]) {
221 struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[i];
222
223 vb_map[i] = pipe_buffer_map(mgr->pipe, vb->buffer,
224 PIPE_TRANSFER_READ, &vb_transfer[i]);
225
226 tr->set_buffer(tr, i,
227 vb_map[i] + vb->buffer_offset + vb->stride * min_index,
228 vb->stride, ~0);
229 }
230 }
231
232 /* Create and map the output buffer. */
233 num_verts = max_index + 1 - min_index;
234
235 u_upload_alloc(mgr->b.uploader,
236 key.output_stride * min_index,
237 key.output_stride * num_verts,
238 &out_offset, &out_buffer, &upload_flushed,
239 (void**)&out_map);
240
241 out_offset -= key.output_stride * min_index;
242
243 /* Translate. */
244 tr->run(tr, 0, num_verts, 0, out_map);
245
246 /* Unmap all buffers. */
247 for (i = 0; i < mgr->b.nr_vertex_buffers; i++) {
248 if (vb_translated[i]) {
249 pipe_buffer_unmap(mgr->pipe, vb_transfer[i]);
250 }
251 }
252
253 /* Setup the new vertex buffer in the first free slot. */
254 mgr->translate_vb_slot = ~0;
255 for (i = 0; i < PIPE_MAX_ATTRIBS; i++) {
256 if (!mgr->b.vertex_buffer[i].buffer) {
257 mgr->translate_vb_slot = i;
258
259 if (i >= mgr->b.nr_vertex_buffers) {
260 mgr->b.nr_real_vertex_buffers = i+1;
261 }
262 break;
263 }
264 }
265
266 if (mgr->translate_vb_slot != ~0) {
267 /* Setup the new vertex buffer. */
268 pipe_resource_reference(
269 &mgr->b.real_vertex_buffer[mgr->translate_vb_slot], out_buffer);
270 mgr->b.vertex_buffer[mgr->translate_vb_slot].buffer_offset = out_offset;
271 mgr->b.vertex_buffer[mgr->translate_vb_slot].stride = key.output_stride;
272
273 /* Setup new vertex elements. */
274 for (i = 0; i < mgr->ve->count; i++) {
275 if (tr_elem_index[i] < key.nr_elements) {
276 te = &key.element[tr_elem_index[i]];
277 new_velems[i].instance_divisor = mgr->ve->ve[i].instance_divisor;
278 new_velems[i].src_format = te->output_format;
279 new_velems[i].src_offset = te->output_offset;
280 new_velems[i].vertex_buffer_index = mgr->translate_vb_slot;
281 } else {
282 memcpy(&new_velems[i], &mgr->ve->ve[i],
283 sizeof(struct pipe_vertex_element));
284 }
285 }
286
287 mgr->fallback_ve =
288 mgr->pipe->create_vertex_elements_state(mgr->pipe, mgr->ve->count,
289 new_velems);
290
291 /* Preserve saved_ve. */
292 mgr->ve_binding_lock = TRUE;
293 mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->fallback_ve);
294 mgr->ve_binding_lock = FALSE;
295 }
296
297 pipe_resource_reference(&out_buffer, NULL);
298
299 return upload_flushed ? U_VBUF_UPLOAD_FLUSHED : 0;
300 }
301
302 static void u_vbuf_translate_end(struct u_vbuf_mgr_priv *mgr)
303 {
304 if (mgr->fallback_ve == NULL) {
305 return;
306 }
307
308 /* Restore vertex elements. */
309 /* Note that saved_ve will be overwritten in bind_vertex_elements_state. */
310 mgr->pipe->bind_vertex_elements_state(mgr->pipe, mgr->saved_ve);
311 mgr->pipe->delete_vertex_elements_state(mgr->pipe, mgr->fallback_ve);
312 mgr->fallback_ve = NULL;
313
314 /* Delete the now-unused VBO. */
315 pipe_resource_reference(&mgr->b.real_vertex_buffer[mgr->translate_vb_slot],
316 NULL);
317 mgr->b.nr_real_vertex_buffers = mgr->b.nr_vertex_buffers;
318 }
319
320 #define FORMAT_REPLACE(what, withwhat) \
321 case PIPE_FORMAT_##what: format = PIPE_FORMAT_##withwhat; break
322
323 struct u_vbuf_mgr_elements *
324 u_vbuf_mgr_create_vertex_elements(struct u_vbuf_mgr *mgrb,
325 unsigned count,
326 const struct pipe_vertex_element *attribs,
327 struct pipe_vertex_element *native_attribs)
328 {
329 struct u_vbuf_mgr_priv *mgr = (struct u_vbuf_mgr_priv*)mgrb;
330 unsigned i;
331 struct u_vbuf_mgr_elements *ve = CALLOC_STRUCT(u_vbuf_mgr_elements);
332
333 ve->count = count;
334
335 if (!count) {
336 return ve;
337 }
338
339 memcpy(ve->ve, attribs, sizeof(struct pipe_vertex_element) * count);
340 memcpy(native_attribs, attribs, sizeof(struct pipe_vertex_element) * count);
341
342 /* Set the best native format in case the original format is not
343 * supported. */
344 for (i = 0; i < count; i++) {
345 enum pipe_format format = ve->ve[i].src_format;
346
347 ve->src_format_size[i] = util_format_get_blocksize(format);
348
349 /* Choose a native format.
350 * For now we don't care about the alignment, that's going to
351 * be sorted out later. */
352 if (!mgr->b.caps.format_fixed32) {
353 switch (format) {
354 FORMAT_REPLACE(R32_FIXED, R32_FLOAT);
355 FORMAT_REPLACE(R32G32_FIXED, R32G32_FLOAT);
356 FORMAT_REPLACE(R32G32B32_FIXED, R32G32B32_FLOAT);
357 FORMAT_REPLACE(R32G32B32A32_FIXED, R32G32B32A32_FLOAT);
358 default:;
359 }
360 }
361 if (!mgr->b.caps.format_float16) {
362 switch (format) {
363 FORMAT_REPLACE(R16_FLOAT, R32_FLOAT);
364 FORMAT_REPLACE(R16G16_FLOAT, R32G32_FLOAT);
365 FORMAT_REPLACE(R16G16B16_FLOAT, R32G32B32_FLOAT);
366 FORMAT_REPLACE(R16G16B16A16_FLOAT, R32G32B32A32_FLOAT);
367 default:;
368 }
369 }
370 if (!mgr->b.caps.format_float64) {
371 switch (format) {
372 FORMAT_REPLACE(R64_FLOAT, R32_FLOAT);
373 FORMAT_REPLACE(R64G64_FLOAT, R32G32_FLOAT);
374 FORMAT_REPLACE(R64G64B64_FLOAT, R32G32B32_FLOAT);
375 FORMAT_REPLACE(R64G64B64A64_FLOAT, R32G32B32A32_FLOAT);
376 default:;
377 }
378 }
379 if (!mgr->b.caps.format_norm32) {
380 switch (format) {
381 FORMAT_REPLACE(R32_UNORM, R32_FLOAT);
382 FORMAT_REPLACE(R32G32_UNORM, R32G32_FLOAT);
383 FORMAT_REPLACE(R32G32B32_UNORM, R32G32B32_FLOAT);
384 FORMAT_REPLACE(R32G32B32A32_UNORM, R32G32B32A32_FLOAT);
385 FORMAT_REPLACE(R32_SNORM, R32_FLOAT);
386 FORMAT_REPLACE(R32G32_SNORM, R32G32_FLOAT);
387 FORMAT_REPLACE(R32G32B32_SNORM, R32G32B32_FLOAT);
388 FORMAT_REPLACE(R32G32B32A32_SNORM, R32G32B32A32_FLOAT);
389 default:;
390 }
391 }
392 if (!mgr->b.caps.format_scaled32) {
393 switch (format) {
394 FORMAT_REPLACE(R32_USCALED, R32_FLOAT);
395 FORMAT_REPLACE(R32G32_USCALED, R32G32_FLOAT);
396 FORMAT_REPLACE(R32G32B32_USCALED, R32G32B32_FLOAT);
397 FORMAT_REPLACE(R32G32B32A32_USCALED,R32G32B32A32_FLOAT);
398 FORMAT_REPLACE(R32_SSCALED, R32_FLOAT);
399 FORMAT_REPLACE(R32G32_SSCALED, R32G32_FLOAT);
400 FORMAT_REPLACE(R32G32B32_SSCALED, R32G32B32_FLOAT);
401 FORMAT_REPLACE(R32G32B32A32_SSCALED,R32G32B32A32_FLOAT);
402 default:;
403 }
404 }
405
406 native_attribs[i].src_format = format;
407 ve->native_format[i] = format;
408 ve->native_format_size[i] =
409 util_format_get_blocksize(ve->native_format[i]);
410
411 ve->incompatible_layout =
412 ve->incompatible_layout ||
413 ve->ve[i].src_format != ve->native_format[i] ||
414 (!mgr->b.caps.fetch_dword_unaligned && ve->ve[i].src_offset % 4 != 0);
415 }
416
417 /* Align the formats to the size of DWORD if needed. */
418 if (!mgr->b.caps.fetch_dword_unaligned) {
419 for (i = 0; i < count; i++) {
420 ve->native_format_size[i] = align(ve->native_format_size[i], 4);
421 }
422 }
423
424 return ve;
425 }
426
427 void u_vbuf_mgr_bind_vertex_elements(struct u_vbuf_mgr *mgrb,
428 void *cso,
429 struct u_vbuf_mgr_elements *ve)
430 {
431 struct u_vbuf_mgr_priv *mgr = (struct u_vbuf_mgr_priv*)mgrb;
432
433 if (!cso) {
434 return;
435 }
436
437 if (!mgr->ve_binding_lock) {
438 mgr->saved_ve = cso;
439 mgr->ve = ve;
440 }
441 }
442
443 void u_vbuf_mgr_destroy_vertex_elements(struct u_vbuf_mgr *mgr,
444 struct u_vbuf_mgr_elements *ve)
445 {
446 FREE(ve);
447 }
448
449 void u_vbuf_mgr_set_vertex_buffers(struct u_vbuf_mgr *mgrb,
450 unsigned count,
451 const struct pipe_vertex_buffer *bufs)
452 {
453 struct u_vbuf_mgr_priv *mgr = (struct u_vbuf_mgr_priv*)mgrb;
454 unsigned i;
455
456 mgr->any_user_vbs = FALSE;
457 mgr->incompatible_vb_layout = FALSE;
458
459 if (!mgr->b.caps.fetch_dword_unaligned) {
460 /* Check if the strides and offsets are aligned to the size of DWORD. */
461 for (i = 0; i < count; i++) {
462 if (bufs[i].buffer) {
463 if (bufs[i].stride % 4 != 0 ||
464 bufs[i].buffer_offset % 4 != 0) {
465 mgr->incompatible_vb_layout = TRUE;
466 break;
467 }
468 }
469 }
470 }
471
472 for (i = 0; i < count; i++) {
473 const struct pipe_vertex_buffer *vb = &bufs[i];
474
475 pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, vb->buffer);
476 pipe_resource_reference(&mgr->b.real_vertex_buffer[i], NULL);
477 mgr->saved_buffer_offset[i] = vb->buffer_offset;
478
479 if (!vb->buffer) {
480 continue;
481 }
482
483 if (u_vbuf_resource(vb->buffer)->user_ptr) {
484 mgr->any_user_vbs = TRUE;
485 continue;
486 }
487
488 pipe_resource_reference(&mgr->b.real_vertex_buffer[i], vb->buffer);
489 }
490
491 for (; i < mgr->b.nr_real_vertex_buffers; i++) {
492 pipe_resource_reference(&mgr->b.vertex_buffer[i].buffer, NULL);
493 pipe_resource_reference(&mgr->b.real_vertex_buffer[i], NULL);
494 }
495
496 memcpy(mgr->b.vertex_buffer, bufs,
497 sizeof(struct pipe_vertex_buffer) * count);
498
499 mgr->b.nr_vertex_buffers = count;
500 mgr->b.nr_real_vertex_buffers = count;
501 }
502
503 static enum u_vbuf_return_flags
504 u_vbuf_upload_buffers(struct u_vbuf_mgr_priv *mgr,
505 int min_index, int max_index,
506 unsigned instance_count)
507 {
508 unsigned i, nr = mgr->ve->count;
509 unsigned count = max_index + 1 - min_index;
510 boolean uploaded[PIPE_MAX_ATTRIBS] = {0};
511 enum u_vbuf_return_flags retval = 0;
512
513 for (i = 0; i < nr; i++) {
514 unsigned index = mgr->ve->ve[i].vertex_buffer_index;
515 struct pipe_vertex_buffer *vb = &mgr->b.vertex_buffer[index];
516
517 if (vb->buffer &&
518 u_vbuf_resource(vb->buffer)->user_ptr &&
519 !uploaded[index]) {
520 unsigned first, size;
521 boolean flushed;
522 unsigned instance_div = mgr->ve->ve[i].instance_divisor;
523
524 if (instance_div) {
525 first = 0;
526 size = vb->stride *
527 ((instance_count + instance_div - 1) / instance_div);
528 } else if (vb->stride) {
529 first = vb->stride * min_index;
530 size = vb->stride * count;
531
532 /* Unusual case when stride is smaller than the format size.
533 * XXX This won't work with interleaved arrays. */
534 if (mgr->ve->native_format_size[i] > vb->stride)
535 size += mgr->ve->native_format_size[i] - vb->stride;
536 } else {
537 first = 0;
538 size = mgr->ve->native_format_size[i];
539 }
540
541 u_upload_data(mgr->b.uploader, first, size,
542 u_vbuf_resource(vb->buffer)->user_ptr + first,
543 &vb->buffer_offset,
544 &mgr->b.real_vertex_buffer[index],
545 &flushed);
546
547 vb->buffer_offset -= first;
548
549 uploaded[index] = TRUE;
550 if (flushed)
551 retval |= U_VBUF_UPLOAD_FLUSHED;
552 } else {
553 assert(mgr->b.real_vertex_buffer[index]);
554 }
555 }
556
557 return retval;
558 }
559
560 static void u_vbuf_mgr_compute_max_index(struct u_vbuf_mgr_priv *mgr)
561 {
562 unsigned i, nr = mgr->ve->count;
563
564 mgr->b.max_index = ~0;
565
566 for (i = 0; i < nr; i++) {
567 struct pipe_vertex_buffer *vb =
568 &mgr->b.vertex_buffer[mgr->ve->ve[i].vertex_buffer_index];
569 int unused;
570 unsigned max_index;
571
572 if (!vb->buffer ||
573 !vb->stride ||
574 u_vbuf_resource(vb->buffer)->user_ptr) {
575 continue;
576 }
577
578 /* How many bytes is unused after the last vertex.
579 * width0 may be "count*stride - unused" and we have to compensate
580 * for that when dividing by stride. */
581 unused = vb->stride -
582 (mgr->ve->ve[i].src_offset + mgr->ve->src_format_size[i]);
583
584 /* If src_offset is greater than stride (which means it's a buffer
585 * offset rather than a vertex offset)... */
586 if (unused < 0) {
587 unused = 0;
588 }
589
590 /* Compute the maximum index for this vertex element. */
591 max_index =
592 (vb->buffer->width0 - vb->buffer_offset + (unsigned)unused) /
593 vb->stride - 1;
594
595 mgr->b.max_index = MIN2(mgr->b.max_index, max_index);
596 }
597 }
598
599 enum u_vbuf_return_flags
600 u_vbuf_mgr_draw_begin(struct u_vbuf_mgr *mgrb,
601 const struct pipe_draw_info *info)
602 {
603 struct u_vbuf_mgr_priv *mgr = (struct u_vbuf_mgr_priv*)mgrb;
604 int min_index, max_index;
605 enum u_vbuf_return_flags retval = 0;
606
607 u_vbuf_mgr_compute_max_index(mgr);
608
609 min_index = info->min_index - info->index_bias;
610 if (info->max_index == ~0) {
611 max_index = mgr->b.max_index;
612 } else {
613 max_index = MIN2(info->max_index - info->index_bias, mgr->b.max_index);
614 }
615
616 /* Translate vertices with non-native layouts or formats. */
617 if (mgr->incompatible_vb_layout || mgr->ve->incompatible_layout) {
618 retval |= u_vbuf_translate_begin(mgr, min_index, max_index);
619
620 if (mgr->fallback_ve) {
621 retval |= U_VBUF_BUFFERS_UPDATED;
622 }
623 }
624
625 /* Upload user buffers. */
626 if (mgr->any_user_vbs) {
627 retval |= u_vbuf_upload_buffers(mgr, min_index, max_index,
628 info->instance_count);
629 retval |= U_VBUF_BUFFERS_UPDATED;
630 }
631 return retval;
632 }
633
634 void u_vbuf_mgr_draw_end(struct u_vbuf_mgr *mgrb)
635 {
636 struct u_vbuf_mgr_priv *mgr = (struct u_vbuf_mgr_priv*)mgrb;
637 unsigned i;
638
639 /* buffer offsets were modified in u_vbuf_upload_buffers */
640 if (mgr->any_user_vbs) {
641 for (i = 0; i < mgr->b.nr_vertex_buffers; i++)
642 mgr->b.vertex_buffer[i].buffer_offset = mgr->saved_buffer_offset[i];
643 }
644
645 if (mgr->fallback_ve) {
646 u_vbuf_translate_end(mgr);
647 }
648 }