st/mesa: reorder code in draw_vbo
[mesa.git] / src / mesa / state_tracker / st_draw.c
1 /**************************************************************************
2 *
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /*
29 * This file implements the st_draw_vbo() function which is called from
30 * Mesa's VBO module. All point/line/triangle rendering is done through
31 * this function whether the user called glBegin/End, glDrawArrays,
32 * glDrawElements, glEvalMesh, or glCalList, etc.
33 *
34 * We basically convert the VBO's vertex attribute/array information into
35 * Gallium vertex state, bind the vertex buffer objects and call
36 * pipe->draw_vbo().
37 *
38 * Authors:
39 * Keith Whitwell <keith@tungstengraphics.com>
40 */
41
42
43 #include "main/imports.h"
44 #include "main/image.h"
45 #include "main/bufferobj.h"
46 #include "main/macros.h"
47 #include "main/mfeatures.h"
48
49 #include "vbo/vbo.h"
50
51 #include "st_context.h"
52 #include "st_atom.h"
53 #include "st_cb_bufferobjects.h"
54 #include "st_cb_xformfb.h"
55 #include "st_draw.h"
56 #include "st_program.h"
57
58 #include "pipe/p_context.h"
59 #include "pipe/p_defines.h"
60 #include "util/u_inlines.h"
61 #include "util/u_format.h"
62 #include "util/u_prim.h"
63 #include "util/u_draw_quad.h"
64 #include "util/u_upload_mgr.h"
65 #include "draw/draw_context.h"
66 #include "cso_cache/cso_context.h"
67
68 #include "../glsl/ir_uniform.h"
69
70
71 static GLuint double_types[4] = {
72 PIPE_FORMAT_R64_FLOAT,
73 PIPE_FORMAT_R64G64_FLOAT,
74 PIPE_FORMAT_R64G64B64_FLOAT,
75 PIPE_FORMAT_R64G64B64A64_FLOAT
76 };
77
78 static GLuint float_types[4] = {
79 PIPE_FORMAT_R32_FLOAT,
80 PIPE_FORMAT_R32G32_FLOAT,
81 PIPE_FORMAT_R32G32B32_FLOAT,
82 PIPE_FORMAT_R32G32B32A32_FLOAT
83 };
84
85 static GLuint half_float_types[4] = {
86 PIPE_FORMAT_R16_FLOAT,
87 PIPE_FORMAT_R16G16_FLOAT,
88 PIPE_FORMAT_R16G16B16_FLOAT,
89 PIPE_FORMAT_R16G16B16A16_FLOAT
90 };
91
92 static GLuint uint_types_norm[4] = {
93 PIPE_FORMAT_R32_UNORM,
94 PIPE_FORMAT_R32G32_UNORM,
95 PIPE_FORMAT_R32G32B32_UNORM,
96 PIPE_FORMAT_R32G32B32A32_UNORM
97 };
98
99 static GLuint uint_types_scale[4] = {
100 PIPE_FORMAT_R32_USCALED,
101 PIPE_FORMAT_R32G32_USCALED,
102 PIPE_FORMAT_R32G32B32_USCALED,
103 PIPE_FORMAT_R32G32B32A32_USCALED
104 };
105
106 static GLuint uint_types_int[4] = {
107 PIPE_FORMAT_R32_UINT,
108 PIPE_FORMAT_R32G32_UINT,
109 PIPE_FORMAT_R32G32B32_UINT,
110 PIPE_FORMAT_R32G32B32A32_UINT
111 };
112
113 static GLuint int_types_norm[4] = {
114 PIPE_FORMAT_R32_SNORM,
115 PIPE_FORMAT_R32G32_SNORM,
116 PIPE_FORMAT_R32G32B32_SNORM,
117 PIPE_FORMAT_R32G32B32A32_SNORM
118 };
119
120 static GLuint int_types_scale[4] = {
121 PIPE_FORMAT_R32_SSCALED,
122 PIPE_FORMAT_R32G32_SSCALED,
123 PIPE_FORMAT_R32G32B32_SSCALED,
124 PIPE_FORMAT_R32G32B32A32_SSCALED
125 };
126
127 static GLuint int_types_int[4] = {
128 PIPE_FORMAT_R32_SINT,
129 PIPE_FORMAT_R32G32_SINT,
130 PIPE_FORMAT_R32G32B32_SINT,
131 PIPE_FORMAT_R32G32B32A32_SINT
132 };
133
134 static GLuint ushort_types_norm[4] = {
135 PIPE_FORMAT_R16_UNORM,
136 PIPE_FORMAT_R16G16_UNORM,
137 PIPE_FORMAT_R16G16B16_UNORM,
138 PIPE_FORMAT_R16G16B16A16_UNORM
139 };
140
141 static GLuint ushort_types_scale[4] = {
142 PIPE_FORMAT_R16_USCALED,
143 PIPE_FORMAT_R16G16_USCALED,
144 PIPE_FORMAT_R16G16B16_USCALED,
145 PIPE_FORMAT_R16G16B16A16_USCALED
146 };
147
148 static GLuint ushort_types_int[4] = {
149 PIPE_FORMAT_R16_UINT,
150 PIPE_FORMAT_R16G16_UINT,
151 PIPE_FORMAT_R16G16B16_UINT,
152 PIPE_FORMAT_R16G16B16A16_UINT
153 };
154
155 static GLuint short_types_norm[4] = {
156 PIPE_FORMAT_R16_SNORM,
157 PIPE_FORMAT_R16G16_SNORM,
158 PIPE_FORMAT_R16G16B16_SNORM,
159 PIPE_FORMAT_R16G16B16A16_SNORM
160 };
161
162 static GLuint short_types_scale[4] = {
163 PIPE_FORMAT_R16_SSCALED,
164 PIPE_FORMAT_R16G16_SSCALED,
165 PIPE_FORMAT_R16G16B16_SSCALED,
166 PIPE_FORMAT_R16G16B16A16_SSCALED
167 };
168
169 static GLuint short_types_int[4] = {
170 PIPE_FORMAT_R16_SINT,
171 PIPE_FORMAT_R16G16_SINT,
172 PIPE_FORMAT_R16G16B16_SINT,
173 PIPE_FORMAT_R16G16B16A16_SINT
174 };
175
176 static GLuint ubyte_types_norm[4] = {
177 PIPE_FORMAT_R8_UNORM,
178 PIPE_FORMAT_R8G8_UNORM,
179 PIPE_FORMAT_R8G8B8_UNORM,
180 PIPE_FORMAT_R8G8B8A8_UNORM
181 };
182
183 static GLuint ubyte_types_scale[4] = {
184 PIPE_FORMAT_R8_USCALED,
185 PIPE_FORMAT_R8G8_USCALED,
186 PIPE_FORMAT_R8G8B8_USCALED,
187 PIPE_FORMAT_R8G8B8A8_USCALED
188 };
189
190 static GLuint ubyte_types_int[4] = {
191 PIPE_FORMAT_R8_UINT,
192 PIPE_FORMAT_R8G8_UINT,
193 PIPE_FORMAT_R8G8B8_UINT,
194 PIPE_FORMAT_R8G8B8A8_UINT
195 };
196
197 static GLuint byte_types_norm[4] = {
198 PIPE_FORMAT_R8_SNORM,
199 PIPE_FORMAT_R8G8_SNORM,
200 PIPE_FORMAT_R8G8B8_SNORM,
201 PIPE_FORMAT_R8G8B8A8_SNORM
202 };
203
204 static GLuint byte_types_scale[4] = {
205 PIPE_FORMAT_R8_SSCALED,
206 PIPE_FORMAT_R8G8_SSCALED,
207 PIPE_FORMAT_R8G8B8_SSCALED,
208 PIPE_FORMAT_R8G8B8A8_SSCALED
209 };
210
211 static GLuint byte_types_int[4] = {
212 PIPE_FORMAT_R8_SINT,
213 PIPE_FORMAT_R8G8_SINT,
214 PIPE_FORMAT_R8G8B8_SINT,
215 PIPE_FORMAT_R8G8B8A8_SINT
216 };
217
218 static GLuint fixed_types[4] = {
219 PIPE_FORMAT_R32_FIXED,
220 PIPE_FORMAT_R32G32_FIXED,
221 PIPE_FORMAT_R32G32B32_FIXED,
222 PIPE_FORMAT_R32G32B32A32_FIXED
223 };
224
225
226
227 /**
228 * Return a PIPE_FORMAT_x for the given GL datatype and size.
229 */
230 enum pipe_format
231 st_pipe_vertex_format(GLenum type, GLuint size, GLenum format,
232 GLboolean normalized, GLboolean integer)
233 {
234 assert((type >= GL_BYTE && type <= GL_DOUBLE) ||
235 type == GL_FIXED || type == GL_HALF_FLOAT ||
236 type == GL_INT_2_10_10_10_REV ||
237 type == GL_UNSIGNED_INT_2_10_10_10_REV);
238 assert(size >= 1);
239 assert(size <= 4);
240 assert(format == GL_RGBA || format == GL_BGRA);
241
242 if (type == GL_INT_2_10_10_10_REV ||
243 type == GL_UNSIGNED_INT_2_10_10_10_REV) {
244 assert(size == 4);
245 assert(!integer);
246
247 if (format == GL_BGRA) {
248 if (type == GL_INT_2_10_10_10_REV) {
249 if (normalized)
250 return PIPE_FORMAT_B10G10R10A2_SNORM;
251 else
252 return PIPE_FORMAT_B10G10R10A2_SSCALED;
253 } else {
254 if (normalized)
255 return PIPE_FORMAT_B10G10R10A2_UNORM;
256 else
257 return PIPE_FORMAT_B10G10R10A2_USCALED;
258 }
259 } else {
260 if (type == GL_INT_2_10_10_10_REV) {
261 if (normalized)
262 return PIPE_FORMAT_R10G10B10A2_SNORM;
263 else
264 return PIPE_FORMAT_R10G10B10A2_SSCALED;
265 } else {
266 if (normalized)
267 return PIPE_FORMAT_R10G10B10A2_UNORM;
268 else
269 return PIPE_FORMAT_R10G10B10A2_USCALED;
270 }
271 }
272 }
273
274 if (format == GL_BGRA) {
275 /* this is an odd-ball case */
276 assert(type == GL_UNSIGNED_BYTE);
277 assert(normalized);
278 return PIPE_FORMAT_B8G8R8A8_UNORM;
279 }
280
281 if (integer) {
282 switch (type) {
283 case GL_INT: return int_types_int[size-1];
284 case GL_SHORT: return short_types_int[size-1];
285 case GL_BYTE: return byte_types_int[size-1];
286 case GL_UNSIGNED_INT: return uint_types_int[size-1];
287 case GL_UNSIGNED_SHORT: return ushort_types_int[size-1];
288 case GL_UNSIGNED_BYTE: return ubyte_types_int[size-1];
289 default: assert(0); return 0;
290 }
291 }
292 else if (normalized) {
293 switch (type) {
294 case GL_DOUBLE: return double_types[size-1];
295 case GL_FLOAT: return float_types[size-1];
296 case GL_HALF_FLOAT: return half_float_types[size-1];
297 case GL_INT: return int_types_norm[size-1];
298 case GL_SHORT: return short_types_norm[size-1];
299 case GL_BYTE: return byte_types_norm[size-1];
300 case GL_UNSIGNED_INT: return uint_types_norm[size-1];
301 case GL_UNSIGNED_SHORT: return ushort_types_norm[size-1];
302 case GL_UNSIGNED_BYTE: return ubyte_types_norm[size-1];
303 case GL_FIXED: return fixed_types[size-1];
304 default: assert(0); return 0;
305 }
306 }
307 else {
308 switch (type) {
309 case GL_DOUBLE: return double_types[size-1];
310 case GL_FLOAT: return float_types[size-1];
311 case GL_HALF_FLOAT: return half_float_types[size-1];
312 case GL_INT: return int_types_scale[size-1];
313 case GL_SHORT: return short_types_scale[size-1];
314 case GL_BYTE: return byte_types_scale[size-1];
315 case GL_UNSIGNED_INT: return uint_types_scale[size-1];
316 case GL_UNSIGNED_SHORT: return ushort_types_scale[size-1];
317 case GL_UNSIGNED_BYTE: return ubyte_types_scale[size-1];
318 case GL_FIXED: return fixed_types[size-1];
319 default: assert(0); return 0;
320 }
321 }
322 return PIPE_FORMAT_NONE; /* silence compiler warning */
323 }
324
325
326 /**
327 * This is very similar to vbo_all_varyings_in_vbos() but we are
328 * only interested in per-vertex data. See bug 38626.
329 */
330 static GLboolean
331 all_varyings_in_vbos(const struct gl_client_array *arrays[])
332 {
333 GLuint i;
334
335 for (i = 0; i < VERT_ATTRIB_MAX; i++)
336 if (arrays[i]->StrideB &&
337 !arrays[i]->InstanceDivisor &&
338 !_mesa_is_bufferobj(arrays[i]->BufferObj))
339 return GL_FALSE;
340
341 return GL_TRUE;
342 }
343
344
345 /**
346 * Examine the active arrays to determine if we have interleaved
347 * vertex arrays all living in one VBO, or all living in user space.
348 */
349 static GLboolean
350 is_interleaved_arrays(const struct st_vertex_program *vp,
351 const struct st_vp_variant *vpv,
352 const struct gl_client_array **arrays)
353 {
354 GLuint attr;
355 const struct gl_buffer_object *firstBufObj = NULL;
356 GLint firstStride = -1;
357 const GLubyte *firstPtr = NULL;
358 GLboolean userSpaceBuffer = GL_FALSE;
359
360 for (attr = 0; attr < vpv->num_inputs; attr++) {
361 const GLuint mesaAttr = vp->index_to_input[attr];
362 const struct gl_client_array *array = arrays[mesaAttr];
363 const struct gl_buffer_object *bufObj = array->BufferObj;
364 const GLsizei stride = array->StrideB; /* in bytes */
365
366 if (attr == 0) {
367 /* save info about the first array */
368 firstStride = stride;
369 firstPtr = array->Ptr;
370 firstBufObj = bufObj;
371 userSpaceBuffer = !bufObj || !bufObj->Name;
372 }
373 else {
374 /* check if other arrays interleave with the first, in same buffer */
375 if (stride != firstStride)
376 return GL_FALSE; /* strides don't match */
377
378 if (bufObj != firstBufObj)
379 return GL_FALSE; /* arrays in different VBOs */
380
381 if (abs(array->Ptr - firstPtr) > firstStride)
382 return GL_FALSE; /* arrays start too far apart */
383
384 if ((!_mesa_is_bufferobj(bufObj)) != userSpaceBuffer)
385 return GL_FALSE; /* mix of VBO and user-space arrays */
386 }
387 }
388
389 return GL_TRUE;
390 }
391
392
393 /**
394 * Set up for drawing interleaved arrays that all live in one VBO
395 * or all live in user space.
396 * \param vbuffer returns vertex buffer info
397 * \param velements returns vertex element info
398 * \return GL_TRUE for success, GL_FALSE otherwise (probably out of memory)
399 */
400 static GLboolean
401 setup_interleaved_attribs(struct gl_context *ctx,
402 const struct st_vertex_program *vp,
403 const struct st_vp_variant *vpv,
404 const struct gl_client_array **arrays,
405 struct pipe_vertex_buffer *vbuffer,
406 struct pipe_vertex_element velements[])
407 {
408 GLuint attr;
409 const GLubyte *low_addr = NULL;
410 GLboolean usingVBO; /* all arrays in a VBO? */
411 struct gl_buffer_object *bufobj;
412 GLsizei stride;
413
414 /* Find the lowest address of the arrays we're drawing,
415 * Init bufobj and stride.
416 */
417 if (vpv->num_inputs) {
418 const GLuint mesaAttr0 = vp->index_to_input[0];
419 const struct gl_client_array *array = arrays[mesaAttr0];
420
421 /* Since we're doing interleaved arrays, we know there'll be at most
422 * one buffer object and the stride will be the same for all arrays.
423 * Grab them now.
424 */
425 bufobj = array->BufferObj;
426 stride = array->StrideB;
427
428 low_addr = arrays[vp->index_to_input[0]]->Ptr;
429
430 for (attr = 1; attr < vpv->num_inputs; attr++) {
431 const GLubyte *start = arrays[vp->index_to_input[attr]]->Ptr;
432 low_addr = MIN2(low_addr, start);
433 }
434 }
435 else {
436 /* not sure we'll ever have zero inputs, but play it safe */
437 bufobj = NULL;
438 stride = 0;
439 low_addr = 0;
440 }
441
442 /* are the arrays in user space? */
443 usingVBO = _mesa_is_bufferobj(bufobj);
444
445 for (attr = 0; attr < vpv->num_inputs; attr++) {
446 const GLuint mesaAttr = vp->index_to_input[attr];
447 const struct gl_client_array *array = arrays[mesaAttr];
448 unsigned src_offset = (unsigned) (array->Ptr - low_addr);
449 GLuint element_size = array->_ElementSize;
450
451 assert(element_size == array->Size * _mesa_sizeof_type(array->Type));
452
453 velements[attr].src_offset = src_offset;
454 velements[attr].instance_divisor = array->InstanceDivisor;
455 velements[attr].vertex_buffer_index = 0;
456 velements[attr].src_format = st_pipe_vertex_format(array->Type,
457 array->Size,
458 array->Format,
459 array->Normalized,
460 array->Integer);
461 assert(velements[attr].src_format);
462 }
463
464 /*
465 * Return the vbuffer info and setup user-space attrib info, if needed.
466 */
467 if (vpv->num_inputs == 0) {
468 /* just defensive coding here */
469 vbuffer->buffer = NULL;
470 vbuffer->user_buffer = NULL;
471 vbuffer->buffer_offset = 0;
472 vbuffer->stride = 0;
473 }
474 else if (usingVBO) {
475 /* all interleaved arrays in a VBO */
476 struct st_buffer_object *stobj = st_buffer_object(bufobj);
477
478 if (!stobj || !stobj->buffer) {
479 /* probably out of memory (or zero-sized buffer) */
480 return GL_FALSE;
481 }
482
483 vbuffer->buffer = stobj->buffer;
484 vbuffer->user_buffer = NULL;
485 vbuffer->buffer_offset = pointer_to_offset(low_addr);
486 vbuffer->stride = stride;
487 }
488 else {
489 /* all interleaved arrays in user memory */
490 vbuffer->buffer = NULL;
491 vbuffer->user_buffer = low_addr;
492 vbuffer->buffer_offset = 0;
493 vbuffer->stride = stride;
494 }
495
496 return GL_TRUE;
497 }
498
499
500 /**
501 * Set up a separate pipe_vertex_buffer and pipe_vertex_element for each
502 * vertex attribute.
503 * \param vbuffer returns vertex buffer info
504 * \param velements returns vertex element info
505 * \return GL_TRUE for success, GL_FALSE otherwise (probably out of memory)
506 */
507 static GLboolean
508 setup_non_interleaved_attribs(struct gl_context *ctx,
509 const struct st_vertex_program *vp,
510 const struct st_vp_variant *vpv,
511 const struct gl_client_array **arrays,
512 struct pipe_vertex_buffer vbuffer[],
513 struct pipe_vertex_element velements[])
514 {
515 GLuint attr;
516
517 for (attr = 0; attr < vpv->num_inputs; attr++) {
518 const GLuint mesaAttr = vp->index_to_input[attr];
519 const struct gl_client_array *array = arrays[mesaAttr];
520 struct gl_buffer_object *bufobj = array->BufferObj;
521 GLsizei stride = array->StrideB;
522
523 assert(array->_ElementSize == array->Size * _mesa_sizeof_type(array->Type));
524
525 if (_mesa_is_bufferobj(bufobj)) {
526 /* Attribute data is in a VBO.
527 * Recall that for VBOs, the gl_client_array->Ptr field is
528 * really an offset from the start of the VBO, not a pointer.
529 */
530 struct st_buffer_object *stobj = st_buffer_object(bufobj);
531
532 if (!stobj || !stobj->buffer) {
533 /* probably out of memory (or zero-sized buffer) */
534 return GL_FALSE;
535 }
536
537 vbuffer[attr].buffer = stobj->buffer;
538 vbuffer[attr].user_buffer = NULL;
539 vbuffer[attr].buffer_offset = pointer_to_offset(array->Ptr);
540 }
541 else {
542 /* wrap user data */
543 void *ptr;
544
545 if (array->Ptr) {
546 ptr = (void *) array->Ptr;
547 }
548 else {
549 /* no array, use ctx->Current.Attrib[] value */
550 ptr = (void *) ctx->Current.Attrib[mesaAttr];
551 stride = 0;
552 }
553
554 assert(ptr);
555
556 vbuffer[attr].buffer = NULL;
557 vbuffer[attr].user_buffer = ptr;
558 vbuffer[attr].buffer_offset = 0;
559 }
560
561 /* common-case setup */
562 vbuffer[attr].stride = stride; /* in bytes */
563
564 velements[attr].src_offset = 0;
565 velements[attr].instance_divisor = array->InstanceDivisor;
566 velements[attr].vertex_buffer_index = attr;
567 velements[attr].src_format = st_pipe_vertex_format(array->Type,
568 array->Size,
569 array->Format,
570 array->Normalized,
571 array->Integer);
572 assert(velements[attr].src_format);
573 }
574
575 return GL_TRUE;
576 }
577
578
579 static void
580 setup_index_buffer(struct st_context *st,
581 const struct _mesa_index_buffer *ib,
582 struct pipe_index_buffer *ibuffer)
583 {
584 struct gl_buffer_object *bufobj = ib->obj;
585
586 ibuffer->index_size = vbo_sizeof_ib_type(ib->type);
587
588 /* get/create the index buffer object */
589 if (_mesa_is_bufferobj(bufobj)) {
590 /* indices are in a real VBO */
591 ibuffer->buffer = st_buffer_object(bufobj)->buffer;
592 ibuffer->offset = pointer_to_offset(ib->ptr);
593 }
594 else if (st->indexbuf_uploader) {
595 u_upload_data(st->indexbuf_uploader, 0, ib->count * ibuffer->index_size,
596 ib->ptr, &ibuffer->offset, &ibuffer->buffer);
597 }
598 else {
599 /* indices are in user space memory */
600 ibuffer->user_buffer = ib->ptr;
601 }
602
603 cso_set_index_buffer(st->cso_context, ibuffer);
604 }
605
606
607 /**
608 * Prior to drawing, check that any uniforms referenced by the
609 * current shader have been set. If a uniform has not been set,
610 * issue a warning.
611 */
612 static void
613 check_uniforms(struct gl_context *ctx)
614 {
615 struct gl_shader_program *shProg[3] = {
616 ctx->Shader.CurrentVertexProgram,
617 ctx->Shader.CurrentGeometryProgram,
618 ctx->Shader.CurrentFragmentProgram,
619 };
620 unsigned j;
621
622 for (j = 0; j < 3; j++) {
623 unsigned i;
624
625 if (shProg[j] == NULL || !shProg[j]->LinkStatus)
626 continue;
627
628 for (i = 0; i < shProg[j]->NumUserUniformStorage; i++) {
629 const struct gl_uniform_storage *u = &shProg[j]->UniformStorage[i];
630 if (!u->initialized) {
631 _mesa_warning(ctx,
632 "Using shader with uninitialized uniform: %s",
633 u->name);
634 }
635 }
636 }
637 }
638
639
640 /*
641 * Notes on primitive restart:
642 * The code below is used when the gallium driver does not support primitive
643 * restart itself. We map the index buffer, find the restart indexes, unmap
644 * the index buffer then draw the sub-primitives delineated by the restarts.
645 * A couple possible optimizations:
646 * 1. Save the list of sub-primitive (start, count) values in a list attached
647 * to the index buffer for re-use in subsequent draws. The list would be
648 * invalidated when the contents of the buffer changed.
649 * 2. If drawing triangle strips or quad strips, create a new index buffer
650 * that uses duplicated vertices to render the disjoint strips as one
651 * long strip. We'd have to be careful to avoid using too much memory
652 * for this.
653 * Finally, some apps might perform better if they don't use primitive restart
654 * at all rather than this fallback path. Set MESA_EXTENSION_OVERRIDE to
655 * "-GL_NV_primitive_restart" to test that.
656 */
657
658
659 struct sub_primitive
660 {
661 unsigned start, count;
662 };
663
664
665 /**
666 * Scan the elements array to find restart indexes. Return a list
667 * of primitive (start,count) pairs to indicate how to draw the sub-
668 * primitives delineated by the restart index.
669 */
670 static struct sub_primitive *
671 find_sub_primitives(const void *elements, unsigned element_size,
672 unsigned start, unsigned end, unsigned restart_index,
673 unsigned *num_sub_prims)
674 {
675 const unsigned max_prims = end - start;
676 struct sub_primitive *sub_prims;
677 unsigned i, cur_start, cur_count, num;
678
679 sub_prims = (struct sub_primitive *)
680 malloc(max_prims * sizeof(struct sub_primitive));
681
682 if (!sub_prims) {
683 *num_sub_prims = 0;
684 return NULL;
685 }
686
687 cur_start = start;
688 cur_count = 0;
689 num = 0;
690
691 #define SCAN_ELEMENTS(TYPE) \
692 for (i = start; i < end; i++) { \
693 if (((const TYPE *) elements)[i] == restart_index) { \
694 if (cur_count > 0) { \
695 assert(num < max_prims); \
696 sub_prims[num].start = cur_start; \
697 sub_prims[num].count = cur_count; \
698 num++; \
699 } \
700 cur_start = i + 1; \
701 cur_count = 0; \
702 } \
703 else { \
704 cur_count++; \
705 } \
706 } \
707 if (cur_count > 0) { \
708 assert(num < max_prims); \
709 sub_prims[num].start = cur_start; \
710 sub_prims[num].count = cur_count; \
711 num++; \
712 }
713
714 switch (element_size) {
715 case 1:
716 SCAN_ELEMENTS(ubyte);
717 break;
718 case 2:
719 SCAN_ELEMENTS(ushort);
720 break;
721 case 4:
722 SCAN_ELEMENTS(uint);
723 break;
724 default:
725 assert(0 && "bad index_size in find_sub_primitives()");
726 }
727
728 #undef SCAN_ELEMENTS
729
730 *num_sub_prims = num;
731
732 return sub_prims;
733 }
734
735
736 /**
737 * For gallium drivers that don't support the primitive restart
738 * feature, handle it here by breaking up the indexed primitive into
739 * sub-primitives.
740 */
741 static void
742 handle_fallback_primitive_restart(struct cso_context *cso,
743 struct pipe_context *pipe,
744 const struct _mesa_index_buffer *ib,
745 struct pipe_index_buffer *ibuffer,
746 struct pipe_draw_info *orig_info)
747 {
748 const unsigned start = orig_info->start;
749 const unsigned count = orig_info->count;
750 struct pipe_draw_info info = *orig_info;
751 struct pipe_transfer *transfer = NULL;
752 unsigned instance, i;
753 const void *ptr = NULL;
754 struct sub_primitive *sub_prims;
755 unsigned num_sub_prims;
756
757 assert(info.indexed);
758 assert(ibuffer->buffer || ibuffer->user_buffer);
759 assert(ib);
760
761 if (!ibuffer->buffer || !ibuffer->user_buffer || !ib)
762 return;
763
764 info.primitive_restart = FALSE;
765 info.instance_count = 1;
766
767 if (_mesa_is_bufferobj(ib->obj)) {
768 ptr = pipe_buffer_map_range(pipe, ibuffer->buffer,
769 start * ibuffer->index_size, /* start */
770 count * ibuffer->index_size, /* length */
771 PIPE_TRANSFER_READ, &transfer);
772 if (!ptr)
773 return;
774
775 ptr = (uint8_t*)ptr + (ibuffer->offset - start * ibuffer->index_size);
776 }
777 else {
778 ptr = ib->ptr;
779 if (!ptr)
780 return;
781 }
782
783 sub_prims = find_sub_primitives(ptr, ibuffer->index_size,
784 0, count, orig_info->restart_index,
785 &num_sub_prims);
786
787 if (transfer)
788 pipe_buffer_unmap(pipe, transfer);
789
790 /* Now draw the sub primitives.
791 * Need to loop over instances as well to preserve draw order.
792 */
793 for (instance = 0; instance < orig_info->instance_count; instance++) {
794 info.start_instance = instance + orig_info->start_instance;
795 for (i = 0; i < num_sub_prims; i++) {
796 info.start = sub_prims[i].start;
797 info.count = sub_prims[i].count;
798 if (u_trim_pipe_prim(info.mode, &info.count)) {
799 cso_draw_vbo(cso, &info);
800 }
801 }
802 }
803
804 if (sub_prims)
805 free(sub_prims);
806 }
807
808
809 /**
810 * Translate OpenGL primtive type (GL_POINTS, GL_TRIANGLE_STRIP, etc) to
811 * the corresponding Gallium type.
812 */
813 static unsigned
814 translate_prim(const struct gl_context *ctx, unsigned prim)
815 {
816 /* GL prims should match Gallium prims, spot-check a few */
817 assert(GL_POINTS == PIPE_PRIM_POINTS);
818 assert(GL_QUADS == PIPE_PRIM_QUADS);
819 assert(GL_TRIANGLE_STRIP_ADJACENCY == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY);
820
821 /* Avoid quadstrips if it's easy to do so:
822 * Note: it's important to do the correct trimming if we change the
823 * prim type! We do that wherever this function is called.
824 */
825 if (prim == GL_QUAD_STRIP &&
826 ctx->Light.ShadeModel != GL_FLAT &&
827 ctx->Polygon.FrontMode == GL_FILL &&
828 ctx->Polygon.BackMode == GL_FILL)
829 prim = GL_TRIANGLE_STRIP;
830
831 return prim;
832 }
833
834
835 /**
836 * Setup vertex arrays and buffers prior to drawing.
837 * \return GL_TRUE for success, GL_FALSE otherwise (probably out of memory)
838 */
839 static GLboolean
840 st_validate_varrays(struct gl_context *ctx,
841 const struct gl_client_array **arrays)
842 {
843 struct st_context *st = st_context(ctx);
844 const struct st_vertex_program *vp;
845 const struct st_vp_variant *vpv;
846 struct pipe_vertex_buffer vbuffer[PIPE_MAX_SHADER_INPUTS];
847 struct pipe_vertex_element velements[PIPE_MAX_ATTRIBS];
848 unsigned num_vbuffers, num_velements;
849
850 /* must get these after state validation! */
851 vp = st->vp;
852 vpv = st->vp_variant;
853
854 memset(velements, 0, sizeof(struct pipe_vertex_element) * vpv->num_inputs);
855
856 /*
857 * Setup the vbuffer[] and velements[] arrays.
858 */
859 if (is_interleaved_arrays(vp, vpv, arrays)) {
860 if (!setup_interleaved_attribs(ctx, vp, vpv, arrays, vbuffer,
861 velements)) {
862 return GL_FALSE;
863 }
864
865 num_vbuffers = 1;
866 num_velements = vpv->num_inputs;
867 if (num_velements == 0)
868 num_vbuffers = 0;
869 }
870 else {
871 if (!setup_non_interleaved_attribs(ctx, vp, vpv, arrays,
872 vbuffer, velements)) {
873 return GL_FALSE;
874 }
875
876 num_vbuffers = vpv->num_inputs;
877 num_velements = vpv->num_inputs;
878 }
879
880 cso_set_vertex_buffers(st->cso_context, num_vbuffers, vbuffer);
881 cso_set_vertex_elements(st->cso_context, num_velements, velements);
882
883 return GL_TRUE;
884 }
885
886
887 /**
888 * This function gets plugged into the VBO module and is called when
889 * we have something to render.
890 * Basically, translate the information into the format expected by gallium.
891 */
892 void
893 st_draw_vbo(struct gl_context *ctx,
894 const struct gl_client_array **arrays,
895 const struct _mesa_prim *prims,
896 GLuint nr_prims,
897 const struct _mesa_index_buffer *ib,
898 GLboolean index_bounds_valid,
899 GLuint min_index,
900 GLuint max_index,
901 struct gl_transform_feedback_object *tfb_vertcount)
902 {
903 struct st_context *st = st_context(ctx);
904 struct pipe_context *pipe = st->pipe;
905 struct pipe_index_buffer ibuffer = {0};
906 struct pipe_draw_info info;
907 unsigned i;
908 GLboolean new_array =
909 st->dirty.st &&
910 (st->dirty.mesa & (_NEW_ARRAY | _NEW_PROGRAM | _NEW_BUFFER_OBJECT)) != 0;
911
912 /* Mesa core state should have been validated already */
913 assert(ctx->NewState == 0x0);
914
915 /* Validate state. */
916 if (st->dirty.st) {
917 GLboolean vertDataEdgeFlags;
918
919 vertDataEdgeFlags = arrays[VERT_ATTRIB_EDGEFLAG]->BufferObj &&
920 arrays[VERT_ATTRIB_EDGEFLAG]->BufferObj->Name;
921 if (vertDataEdgeFlags != st->vertdata_edgeflags) {
922 st->vertdata_edgeflags = vertDataEdgeFlags;
923 st->dirty.st |= ST_NEW_EDGEFLAGS_DATA;
924 }
925
926 st_validate_state(st);
927
928 if (new_array) {
929 if (!st_validate_varrays(ctx, arrays)) {
930 /* probably out of memory, no-op the draw call */
931 return;
932 }
933 }
934
935 #if 0
936 if (MESA_VERBOSE & VERBOSE_GLSL) {
937 check_uniforms(ctx);
938 }
939 #else
940 (void) check_uniforms;
941 #endif
942 }
943
944 util_draw_init_info(&info);
945 if (ib) {
946 /* Get index bounds for user buffers. */
947 if (!index_bounds_valid)
948 if (!all_varyings_in_vbos(arrays))
949 vbo_get_minmax_indices(ctx, prims, ib, &min_index, &max_index,
950 nr_prims);
951
952 setup_index_buffer(st, ib, &ibuffer);
953
954 info.indexed = TRUE;
955 if (min_index != ~0 && max_index != ~0) {
956 info.min_index = min_index;
957 info.max_index = max_index;
958 }
959
960 /* The VBO module handles restart for the non-indexed GLDrawArrays
961 * so we only set these fields for indexed drawing:
962 */
963 info.primitive_restart = ctx->Array.PrimitiveRestart;
964 info.restart_index = ctx->Array.RestartIndex;
965 }
966 else {
967 /* Transform feedback drawing is always non-indexed. */
968 /* Set info.count_from_stream_output. */
969 if (tfb_vertcount) {
970 st_transform_feedback_draw_init(tfb_vertcount, &info);
971 }
972 }
973
974 /* do actual drawing */
975 for (i = 0; i < nr_prims; i++) {
976 info.mode = translate_prim( ctx, prims[i].mode );
977 info.start = prims[i].start;
978 info.count = prims[i].count;
979 info.instance_count = prims[i].num_instances;
980 info.index_bias = prims[i].basevertex;
981 if (!ib) {
982 info.min_index = info.start;
983 info.max_index = info.start + info.count - 1;
984 }
985
986 if (info.count_from_stream_output) {
987 cso_draw_vbo(st->cso_context, &info);
988 }
989 else if (info.primitive_restart) {
990 if (st->sw_primitive_restart) {
991 /* Handle primitive restart for drivers that doesn't support it */
992 handle_fallback_primitive_restart(st->cso_context, pipe, ib,
993 &ibuffer, &info);
994 }
995 else {
996 /* don't trim, restarts might be inside index list */
997 cso_draw_vbo(st->cso_context, &info);
998 }
999 }
1000 else if (u_trim_pipe_prim(info.mode, &info.count))
1001 cso_draw_vbo(st->cso_context, &info);
1002 }
1003
1004 if (ib && st->indexbuf_uploader && !_mesa_is_bufferobj(ib->obj)) {
1005 pipe_resource_reference(&ibuffer.buffer, NULL);
1006 }
1007 }
1008
1009
1010 void
1011 st_init_draw(struct st_context *st)
1012 {
1013 struct gl_context *ctx = st->ctx;
1014
1015 vbo_set_draw_func(ctx, st_draw_vbo);
1016
1017 #if FEATURE_feedback || FEATURE_rastpos
1018 st->draw = draw_create(st->pipe); /* for selection/feedback */
1019
1020 /* Disable draw options that might convert points/lines to tris, etc.
1021 * as that would foul-up feedback/selection mode.
1022 */
1023 draw_wide_line_threshold(st->draw, 1000.0f);
1024 draw_wide_point_threshold(st->draw, 1000.0f);
1025 draw_enable_line_stipple(st->draw, FALSE);
1026 draw_enable_point_sprites(st->draw, FALSE);
1027 #endif
1028 }
1029
1030
1031 void
1032 st_destroy_draw(struct st_context *st)
1033 {
1034 #if FEATURE_feedback || FEATURE_rastpos
1035 draw_destroy(st->draw);
1036 #endif
1037 }