svga: check for and skip null vertex buffer pointers
[mesa.git] / src / mesa / state_tracker / st_draw.c
1 /**************************************************************************
2 *
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /*
29 * This file implements the st_draw_vbo() function which is called from
30 * Mesa's VBO module. All point/line/triangle rendering is done through
31 * this function whether the user called glBegin/End, glDrawArrays,
32 * glDrawElements, glEvalMesh, or glCalList, etc.
33 *
34 * We basically convert the VBO's vertex attribute/array information into
35 * Gallium vertex state, bind the vertex buffer objects and call
36 * pipe->draw_vbo().
37 *
38 * Authors:
39 * Keith Whitwell <keith@tungstengraphics.com>
40 */
41
42
43 #include "main/imports.h"
44 #include "main/image.h"
45 #include "main/bufferobj.h"
46 #include "main/macros.h"
47 #include "main/mfeatures.h"
48
49 #include "vbo/vbo.h"
50
51 #include "st_context.h"
52 #include "st_atom.h"
53 #include "st_cb_bufferobjects.h"
54 #include "st_cb_xformfb.h"
55 #include "st_draw.h"
56 #include "st_program.h"
57
58 #include "pipe/p_context.h"
59 #include "pipe/p_defines.h"
60 #include "util/u_inlines.h"
61 #include "util/u_format.h"
62 #include "util/u_prim.h"
63 #include "util/u_draw_quad.h"
64 #include "util/u_upload_mgr.h"
65 #include "draw/draw_context.h"
66 #include "cso_cache/cso_context.h"
67
68 #include "../glsl/ir_uniform.h"
69
70
71 static GLuint double_types[4] = {
72 PIPE_FORMAT_R64_FLOAT,
73 PIPE_FORMAT_R64G64_FLOAT,
74 PIPE_FORMAT_R64G64B64_FLOAT,
75 PIPE_FORMAT_R64G64B64A64_FLOAT
76 };
77
78 static GLuint float_types[4] = {
79 PIPE_FORMAT_R32_FLOAT,
80 PIPE_FORMAT_R32G32_FLOAT,
81 PIPE_FORMAT_R32G32B32_FLOAT,
82 PIPE_FORMAT_R32G32B32A32_FLOAT
83 };
84
85 static GLuint half_float_types[4] = {
86 PIPE_FORMAT_R16_FLOAT,
87 PIPE_FORMAT_R16G16_FLOAT,
88 PIPE_FORMAT_R16G16B16_FLOAT,
89 PIPE_FORMAT_R16G16B16A16_FLOAT
90 };
91
92 static GLuint uint_types_norm[4] = {
93 PIPE_FORMAT_R32_UNORM,
94 PIPE_FORMAT_R32G32_UNORM,
95 PIPE_FORMAT_R32G32B32_UNORM,
96 PIPE_FORMAT_R32G32B32A32_UNORM
97 };
98
99 static GLuint uint_types_scale[4] = {
100 PIPE_FORMAT_R32_USCALED,
101 PIPE_FORMAT_R32G32_USCALED,
102 PIPE_FORMAT_R32G32B32_USCALED,
103 PIPE_FORMAT_R32G32B32A32_USCALED
104 };
105
106 static GLuint uint_types_int[4] = {
107 PIPE_FORMAT_R32_UINT,
108 PIPE_FORMAT_R32G32_UINT,
109 PIPE_FORMAT_R32G32B32_UINT,
110 PIPE_FORMAT_R32G32B32A32_UINT
111 };
112
113 static GLuint int_types_norm[4] = {
114 PIPE_FORMAT_R32_SNORM,
115 PIPE_FORMAT_R32G32_SNORM,
116 PIPE_FORMAT_R32G32B32_SNORM,
117 PIPE_FORMAT_R32G32B32A32_SNORM
118 };
119
120 static GLuint int_types_scale[4] = {
121 PIPE_FORMAT_R32_SSCALED,
122 PIPE_FORMAT_R32G32_SSCALED,
123 PIPE_FORMAT_R32G32B32_SSCALED,
124 PIPE_FORMAT_R32G32B32A32_SSCALED
125 };
126
127 static GLuint int_types_int[4] = {
128 PIPE_FORMAT_R32_SINT,
129 PIPE_FORMAT_R32G32_SINT,
130 PIPE_FORMAT_R32G32B32_SINT,
131 PIPE_FORMAT_R32G32B32A32_SINT
132 };
133
134 static GLuint ushort_types_norm[4] = {
135 PIPE_FORMAT_R16_UNORM,
136 PIPE_FORMAT_R16G16_UNORM,
137 PIPE_FORMAT_R16G16B16_UNORM,
138 PIPE_FORMAT_R16G16B16A16_UNORM
139 };
140
141 static GLuint ushort_types_scale[4] = {
142 PIPE_FORMAT_R16_USCALED,
143 PIPE_FORMAT_R16G16_USCALED,
144 PIPE_FORMAT_R16G16B16_USCALED,
145 PIPE_FORMAT_R16G16B16A16_USCALED
146 };
147
148 static GLuint ushort_types_int[4] = {
149 PIPE_FORMAT_R16_UINT,
150 PIPE_FORMAT_R16G16_UINT,
151 PIPE_FORMAT_R16G16B16_UINT,
152 PIPE_FORMAT_R16G16B16A16_UINT
153 };
154
155 static GLuint short_types_norm[4] = {
156 PIPE_FORMAT_R16_SNORM,
157 PIPE_FORMAT_R16G16_SNORM,
158 PIPE_FORMAT_R16G16B16_SNORM,
159 PIPE_FORMAT_R16G16B16A16_SNORM
160 };
161
162 static GLuint short_types_scale[4] = {
163 PIPE_FORMAT_R16_SSCALED,
164 PIPE_FORMAT_R16G16_SSCALED,
165 PIPE_FORMAT_R16G16B16_SSCALED,
166 PIPE_FORMAT_R16G16B16A16_SSCALED
167 };
168
169 static GLuint short_types_int[4] = {
170 PIPE_FORMAT_R16_SINT,
171 PIPE_FORMAT_R16G16_SINT,
172 PIPE_FORMAT_R16G16B16_SINT,
173 PIPE_FORMAT_R16G16B16A16_SINT
174 };
175
176 static GLuint ubyte_types_norm[4] = {
177 PIPE_FORMAT_R8_UNORM,
178 PIPE_FORMAT_R8G8_UNORM,
179 PIPE_FORMAT_R8G8B8_UNORM,
180 PIPE_FORMAT_R8G8B8A8_UNORM
181 };
182
183 static GLuint ubyte_types_scale[4] = {
184 PIPE_FORMAT_R8_USCALED,
185 PIPE_FORMAT_R8G8_USCALED,
186 PIPE_FORMAT_R8G8B8_USCALED,
187 PIPE_FORMAT_R8G8B8A8_USCALED
188 };
189
190 static GLuint ubyte_types_int[4] = {
191 PIPE_FORMAT_R8_UINT,
192 PIPE_FORMAT_R8G8_UINT,
193 PIPE_FORMAT_R8G8B8_UINT,
194 PIPE_FORMAT_R8G8B8A8_UINT
195 };
196
197 static GLuint byte_types_norm[4] = {
198 PIPE_FORMAT_R8_SNORM,
199 PIPE_FORMAT_R8G8_SNORM,
200 PIPE_FORMAT_R8G8B8_SNORM,
201 PIPE_FORMAT_R8G8B8A8_SNORM
202 };
203
204 static GLuint byte_types_scale[4] = {
205 PIPE_FORMAT_R8_SSCALED,
206 PIPE_FORMAT_R8G8_SSCALED,
207 PIPE_FORMAT_R8G8B8_SSCALED,
208 PIPE_FORMAT_R8G8B8A8_SSCALED
209 };
210
211 static GLuint byte_types_int[4] = {
212 PIPE_FORMAT_R8_SINT,
213 PIPE_FORMAT_R8G8_SINT,
214 PIPE_FORMAT_R8G8B8_SINT,
215 PIPE_FORMAT_R8G8B8A8_SINT
216 };
217
218 static GLuint fixed_types[4] = {
219 PIPE_FORMAT_R32_FIXED,
220 PIPE_FORMAT_R32G32_FIXED,
221 PIPE_FORMAT_R32G32B32_FIXED,
222 PIPE_FORMAT_R32G32B32A32_FIXED
223 };
224
225
226
227 /**
228 * Return a PIPE_FORMAT_x for the given GL datatype and size.
229 */
230 enum pipe_format
231 st_pipe_vertex_format(GLenum type, GLuint size, GLenum format,
232 GLboolean normalized, GLboolean integer)
233 {
234 assert((type >= GL_BYTE && type <= GL_DOUBLE) ||
235 type == GL_FIXED || type == GL_HALF_FLOAT ||
236 type == GL_INT_2_10_10_10_REV ||
237 type == GL_UNSIGNED_INT_2_10_10_10_REV);
238 assert(size >= 1);
239 assert(size <= 4);
240 assert(format == GL_RGBA || format == GL_BGRA);
241
242 if (type == GL_INT_2_10_10_10_REV ||
243 type == GL_UNSIGNED_INT_2_10_10_10_REV) {
244 assert(size == 4);
245 assert(!integer);
246
247 if (format == GL_BGRA) {
248 if (type == GL_INT_2_10_10_10_REV) {
249 if (normalized)
250 return PIPE_FORMAT_B10G10R10A2_SNORM;
251 else
252 return PIPE_FORMAT_B10G10R10A2_SSCALED;
253 } else {
254 if (normalized)
255 return PIPE_FORMAT_B10G10R10A2_UNORM;
256 else
257 return PIPE_FORMAT_B10G10R10A2_USCALED;
258 }
259 } else {
260 if (type == GL_INT_2_10_10_10_REV) {
261 if (normalized)
262 return PIPE_FORMAT_R10G10B10A2_SNORM;
263 else
264 return PIPE_FORMAT_R10G10B10A2_SSCALED;
265 } else {
266 if (normalized)
267 return PIPE_FORMAT_R10G10B10A2_UNORM;
268 else
269 return PIPE_FORMAT_R10G10B10A2_USCALED;
270 }
271 }
272 }
273
274 if (format == GL_BGRA) {
275 /* this is an odd-ball case */
276 assert(type == GL_UNSIGNED_BYTE);
277 assert(normalized);
278 return PIPE_FORMAT_B8G8R8A8_UNORM;
279 }
280
281 if (integer) {
282 switch (type) {
283 case GL_INT: return int_types_int[size-1];
284 case GL_SHORT: return short_types_int[size-1];
285 case GL_BYTE: return byte_types_int[size-1];
286 case GL_UNSIGNED_INT: return uint_types_int[size-1];
287 case GL_UNSIGNED_SHORT: return ushort_types_int[size-1];
288 case GL_UNSIGNED_BYTE: return ubyte_types_int[size-1];
289 default: assert(0); return 0;
290 }
291 }
292 else if (normalized) {
293 switch (type) {
294 case GL_DOUBLE: return double_types[size-1];
295 case GL_FLOAT: return float_types[size-1];
296 case GL_HALF_FLOAT: return half_float_types[size-1];
297 case GL_INT: return int_types_norm[size-1];
298 case GL_SHORT: return short_types_norm[size-1];
299 case GL_BYTE: return byte_types_norm[size-1];
300 case GL_UNSIGNED_INT: return uint_types_norm[size-1];
301 case GL_UNSIGNED_SHORT: return ushort_types_norm[size-1];
302 case GL_UNSIGNED_BYTE: return ubyte_types_norm[size-1];
303 case GL_FIXED: return fixed_types[size-1];
304 default: assert(0); return 0;
305 }
306 }
307 else {
308 switch (type) {
309 case GL_DOUBLE: return double_types[size-1];
310 case GL_FLOAT: return float_types[size-1];
311 case GL_HALF_FLOAT: return half_float_types[size-1];
312 case GL_INT: return int_types_scale[size-1];
313 case GL_SHORT: return short_types_scale[size-1];
314 case GL_BYTE: return byte_types_scale[size-1];
315 case GL_UNSIGNED_INT: return uint_types_scale[size-1];
316 case GL_UNSIGNED_SHORT: return ushort_types_scale[size-1];
317 case GL_UNSIGNED_BYTE: return ubyte_types_scale[size-1];
318 case GL_FIXED: return fixed_types[size-1];
319 default: assert(0); return 0;
320 }
321 }
322 return PIPE_FORMAT_NONE; /* silence compiler warning */
323 }
324
325
326 /**
327 * This is very similar to vbo_all_varyings_in_vbos() but we are
328 * only interested in per-vertex data. See bug 38626.
329 */
330 static GLboolean
331 all_varyings_in_vbos(const struct gl_client_array *arrays[])
332 {
333 GLuint i;
334
335 for (i = 0; i < VERT_ATTRIB_MAX; i++)
336 if (arrays[i]->StrideB &&
337 !arrays[i]->InstanceDivisor &&
338 !_mesa_is_bufferobj(arrays[i]->BufferObj))
339 return GL_FALSE;
340
341 return GL_TRUE;
342 }
343
344
345 /**
346 * Examine the active arrays to determine if we have interleaved
347 * vertex arrays all living in one VBO, or all living in user space.
348 */
349 static GLboolean
350 is_interleaved_arrays(const struct st_vertex_program *vp,
351 const struct st_vp_variant *vpv,
352 const struct gl_client_array **arrays)
353 {
354 GLuint attr;
355 const struct gl_buffer_object *firstBufObj = NULL;
356 GLint firstStride = -1;
357 const GLubyte *firstPtr = NULL;
358 GLboolean userSpaceBuffer = GL_FALSE;
359
360 for (attr = 0; attr < vpv->num_inputs; attr++) {
361 const GLuint mesaAttr = vp->index_to_input[attr];
362 const struct gl_client_array *array = arrays[mesaAttr];
363 const struct gl_buffer_object *bufObj = array->BufferObj;
364 const GLsizei stride = array->StrideB; /* in bytes */
365
366 if (attr == 0) {
367 /* save info about the first array */
368 firstStride = stride;
369 firstPtr = array->Ptr;
370 firstBufObj = bufObj;
371 userSpaceBuffer = !bufObj || !bufObj->Name;
372 }
373 else {
374 /* check if other arrays interleave with the first, in same buffer */
375 if (stride != firstStride)
376 return GL_FALSE; /* strides don't match */
377
378 if (bufObj != firstBufObj)
379 return GL_FALSE; /* arrays in different VBOs */
380
381 if (abs(array->Ptr - firstPtr) > firstStride)
382 return GL_FALSE; /* arrays start too far apart */
383
384 if ((!_mesa_is_bufferobj(bufObj)) != userSpaceBuffer)
385 return GL_FALSE; /* mix of VBO and user-space arrays */
386 }
387 }
388
389 return GL_TRUE;
390 }
391
392
393 /**
394 * Set up for drawing interleaved arrays that all live in one VBO
395 * or all live in user space.
396 * \param vbuffer returns vertex buffer info
397 * \param velements returns vertex element info
398 * \return GL_TRUE for success, GL_FALSE otherwise (probably out of memory)
399 */
400 static GLboolean
401 setup_interleaved_attribs(struct gl_context *ctx,
402 const struct st_vertex_program *vp,
403 const struct st_vp_variant *vpv,
404 const struct gl_client_array **arrays,
405 struct pipe_vertex_buffer *vbuffer,
406 struct pipe_vertex_element velements[])
407 {
408 GLuint attr;
409 const GLubyte *low_addr = NULL;
410 GLboolean usingVBO; /* all arrays in a VBO? */
411 struct gl_buffer_object *bufobj;
412 GLsizei stride;
413
414 /* Find the lowest address of the arrays we're drawing,
415 * Init bufobj and stride.
416 */
417 if (vpv->num_inputs) {
418 const GLuint mesaAttr0 = vp->index_to_input[0];
419 const struct gl_client_array *array = arrays[mesaAttr0];
420
421 /* Since we're doing interleaved arrays, we know there'll be at most
422 * one buffer object and the stride will be the same for all arrays.
423 * Grab them now.
424 */
425 bufobj = array->BufferObj;
426 stride = array->StrideB;
427
428 low_addr = arrays[vp->index_to_input[0]]->Ptr;
429
430 for (attr = 1; attr < vpv->num_inputs; attr++) {
431 const GLubyte *start = arrays[vp->index_to_input[attr]]->Ptr;
432 low_addr = MIN2(low_addr, start);
433 }
434 }
435 else {
436 /* not sure we'll ever have zero inputs, but play it safe */
437 bufobj = NULL;
438 stride = 0;
439 low_addr = 0;
440 }
441
442 /* are the arrays in user space? */
443 usingVBO = _mesa_is_bufferobj(bufobj);
444
445 for (attr = 0; attr < vpv->num_inputs; attr++) {
446 const GLuint mesaAttr = vp->index_to_input[attr];
447 const struct gl_client_array *array = arrays[mesaAttr];
448 unsigned src_offset = (unsigned) (array->Ptr - low_addr);
449 GLuint element_size = array->_ElementSize;
450
451 assert(element_size == array->Size * _mesa_sizeof_type(array->Type));
452
453 velements[attr].src_offset = src_offset;
454 velements[attr].instance_divisor = array->InstanceDivisor;
455 velements[attr].vertex_buffer_index = 0;
456 velements[attr].src_format = st_pipe_vertex_format(array->Type,
457 array->Size,
458 array->Format,
459 array->Normalized,
460 array->Integer);
461 assert(velements[attr].src_format);
462 }
463
464 /*
465 * Return the vbuffer info and setup user-space attrib info, if needed.
466 */
467 if (vpv->num_inputs == 0) {
468 /* just defensive coding here */
469 vbuffer->buffer = NULL;
470 vbuffer->user_buffer = NULL;
471 vbuffer->buffer_offset = 0;
472 vbuffer->stride = 0;
473 }
474 else if (usingVBO) {
475 /* all interleaved arrays in a VBO */
476 struct st_buffer_object *stobj = st_buffer_object(bufobj);
477
478 if (!stobj || !stobj->buffer) {
479 /* probably out of memory (or zero-sized buffer) */
480 return GL_FALSE;
481 }
482
483 vbuffer->buffer = stobj->buffer;
484 vbuffer->user_buffer = NULL;
485 vbuffer->buffer_offset = pointer_to_offset(low_addr);
486 vbuffer->stride = stride;
487 }
488 else {
489 /* all interleaved arrays in user memory */
490 vbuffer->buffer = NULL;
491 vbuffer->user_buffer = low_addr;
492 vbuffer->buffer_offset = 0;
493 vbuffer->stride = stride;
494 }
495
496 return GL_TRUE;
497 }
498
499
500 /**
501 * Set up a separate pipe_vertex_buffer and pipe_vertex_element for each
502 * vertex attribute.
503 * \param vbuffer returns vertex buffer info
504 * \param velements returns vertex element info
505 * \return GL_TRUE for success, GL_FALSE otherwise (probably out of memory)
506 */
507 static GLboolean
508 setup_non_interleaved_attribs(struct gl_context *ctx,
509 const struct st_vertex_program *vp,
510 const struct st_vp_variant *vpv,
511 const struct gl_client_array **arrays,
512 struct pipe_vertex_buffer vbuffer[],
513 struct pipe_vertex_element velements[])
514 {
515 GLuint attr;
516
517 for (attr = 0; attr < vpv->num_inputs; attr++) {
518 const GLuint mesaAttr = vp->index_to_input[attr];
519 const struct gl_client_array *array = arrays[mesaAttr];
520 struct gl_buffer_object *bufobj = array->BufferObj;
521 GLsizei stride = array->StrideB;
522
523 assert(array->_ElementSize == array->Size * _mesa_sizeof_type(array->Type));
524
525 if (_mesa_is_bufferobj(bufobj)) {
526 /* Attribute data is in a VBO.
527 * Recall that for VBOs, the gl_client_array->Ptr field is
528 * really an offset from the start of the VBO, not a pointer.
529 */
530 struct st_buffer_object *stobj = st_buffer_object(bufobj);
531
532 if (!stobj || !stobj->buffer) {
533 /* probably out of memory (or zero-sized buffer) */
534 return GL_FALSE;
535 }
536
537 vbuffer[attr].buffer = stobj->buffer;
538 vbuffer[attr].user_buffer = NULL;
539 vbuffer[attr].buffer_offset = pointer_to_offset(array->Ptr);
540 }
541 else {
542 /* wrap user data */
543 void *ptr;
544
545 if (array->Ptr) {
546 ptr = (void *) array->Ptr;
547 }
548 else {
549 /* no array, use ctx->Current.Attrib[] value */
550 ptr = (void *) ctx->Current.Attrib[mesaAttr];
551 stride = 0;
552 }
553
554 assert(ptr);
555
556 vbuffer[attr].buffer = NULL;
557 vbuffer[attr].user_buffer = ptr;
558 vbuffer[attr].buffer_offset = 0;
559 }
560
561 /* common-case setup */
562 vbuffer[attr].stride = stride; /* in bytes */
563
564 velements[attr].src_offset = 0;
565 velements[attr].instance_divisor = array->InstanceDivisor;
566 velements[attr].vertex_buffer_index = attr;
567 velements[attr].src_format = st_pipe_vertex_format(array->Type,
568 array->Size,
569 array->Format,
570 array->Normalized,
571 array->Integer);
572 assert(velements[attr].src_format);
573 }
574
575 return GL_TRUE;
576 }
577
578
579 static void
580 setup_index_buffer(struct st_context *st,
581 const struct _mesa_index_buffer *ib,
582 struct pipe_index_buffer *ibuffer)
583 {
584 struct gl_buffer_object *bufobj = ib->obj;
585
586 ibuffer->index_size = vbo_sizeof_ib_type(ib->type);
587
588 /* get/create the index buffer object */
589 if (_mesa_is_bufferobj(bufobj)) {
590 /* indices are in a real VBO */
591 ibuffer->buffer = st_buffer_object(bufobj)->buffer;
592 ibuffer->offset = pointer_to_offset(ib->ptr);
593 }
594 else if (st->indexbuf_uploader) {
595 u_upload_data(st->indexbuf_uploader, 0, ib->count * ibuffer->index_size,
596 ib->ptr, &ibuffer->offset, &ibuffer->buffer);
597 u_upload_unmap(st->indexbuf_uploader);
598 }
599 else {
600 /* indices are in user space memory */
601 ibuffer->user_buffer = ib->ptr;
602 }
603
604 cso_set_index_buffer(st->cso_context, ibuffer);
605 }
606
607
608 /**
609 * Prior to drawing, check that any uniforms referenced by the
610 * current shader have been set. If a uniform has not been set,
611 * issue a warning.
612 */
613 static void
614 check_uniforms(struct gl_context *ctx)
615 {
616 struct gl_shader_program *shProg[3] = {
617 ctx->Shader.CurrentVertexProgram,
618 ctx->Shader.CurrentGeometryProgram,
619 ctx->Shader.CurrentFragmentProgram,
620 };
621 unsigned j;
622
623 for (j = 0; j < 3; j++) {
624 unsigned i;
625
626 if (shProg[j] == NULL || !shProg[j]->LinkStatus)
627 continue;
628
629 for (i = 0; i < shProg[j]->NumUserUniformStorage; i++) {
630 const struct gl_uniform_storage *u = &shProg[j]->UniformStorage[i];
631 if (!u->initialized) {
632 _mesa_warning(ctx,
633 "Using shader with uninitialized uniform: %s",
634 u->name);
635 }
636 }
637 }
638 }
639
640
641 /*
642 * Notes on primitive restart:
643 * The code below is used when the gallium driver does not support primitive
644 * restart itself. We map the index buffer, find the restart indexes, unmap
645 * the index buffer then draw the sub-primitives delineated by the restarts.
646 * A couple possible optimizations:
647 * 1. Save the list of sub-primitive (start, count) values in a list attached
648 * to the index buffer for re-use in subsequent draws. The list would be
649 * invalidated when the contents of the buffer changed.
650 * 2. If drawing triangle strips or quad strips, create a new index buffer
651 * that uses duplicated vertices to render the disjoint strips as one
652 * long strip. We'd have to be careful to avoid using too much memory
653 * for this.
654 * Finally, some apps might perform better if they don't use primitive restart
655 * at all rather than this fallback path. Set MESA_EXTENSION_OVERRIDE to
656 * "-GL_NV_primitive_restart" to test that.
657 */
658
659
660 struct sub_primitive
661 {
662 unsigned start, count;
663 };
664
665
666 /**
667 * Scan the elements array to find restart indexes. Return a list
668 * of primitive (start,count) pairs to indicate how to draw the sub-
669 * primitives delineated by the restart index.
670 */
671 static struct sub_primitive *
672 find_sub_primitives(const void *elements, unsigned element_size,
673 unsigned start, unsigned end, unsigned restart_index,
674 unsigned *num_sub_prims)
675 {
676 const unsigned max_prims = end - start;
677 struct sub_primitive *sub_prims;
678 unsigned i, cur_start, cur_count, num;
679
680 sub_prims = (struct sub_primitive *)
681 malloc(max_prims * sizeof(struct sub_primitive));
682
683 if (!sub_prims) {
684 *num_sub_prims = 0;
685 return NULL;
686 }
687
688 cur_start = start;
689 cur_count = 0;
690 num = 0;
691
692 #define SCAN_ELEMENTS(TYPE) \
693 for (i = start; i < end; i++) { \
694 if (((const TYPE *) elements)[i] == restart_index) { \
695 if (cur_count > 0) { \
696 assert(num < max_prims); \
697 sub_prims[num].start = cur_start; \
698 sub_prims[num].count = cur_count; \
699 num++; \
700 } \
701 cur_start = i + 1; \
702 cur_count = 0; \
703 } \
704 else { \
705 cur_count++; \
706 } \
707 } \
708 if (cur_count > 0) { \
709 assert(num < max_prims); \
710 sub_prims[num].start = cur_start; \
711 sub_prims[num].count = cur_count; \
712 num++; \
713 }
714
715 switch (element_size) {
716 case 1:
717 SCAN_ELEMENTS(ubyte);
718 break;
719 case 2:
720 SCAN_ELEMENTS(ushort);
721 break;
722 case 4:
723 SCAN_ELEMENTS(uint);
724 break;
725 default:
726 assert(0 && "bad index_size in find_sub_primitives()");
727 }
728
729 #undef SCAN_ELEMENTS
730
731 *num_sub_prims = num;
732
733 return sub_prims;
734 }
735
736
737 /**
738 * For gallium drivers that don't support the primitive restart
739 * feature, handle it here by breaking up the indexed primitive into
740 * sub-primitives.
741 */
742 static void
743 handle_fallback_primitive_restart(struct cso_context *cso,
744 struct pipe_context *pipe,
745 const struct _mesa_index_buffer *ib,
746 struct pipe_index_buffer *ibuffer,
747 struct pipe_draw_info *orig_info)
748 {
749 const unsigned start = orig_info->start;
750 const unsigned count = orig_info->count;
751 struct pipe_draw_info info = *orig_info;
752 struct pipe_transfer *transfer = NULL;
753 unsigned instance, i;
754 const void *ptr = NULL;
755 struct sub_primitive *sub_prims;
756 unsigned num_sub_prims;
757
758 assert(info.indexed);
759 assert(ibuffer->buffer || ibuffer->user_buffer);
760 assert(ib);
761
762 if (!ibuffer->buffer || !ibuffer->user_buffer || !ib)
763 return;
764
765 info.primitive_restart = FALSE;
766 info.instance_count = 1;
767
768 if (_mesa_is_bufferobj(ib->obj)) {
769 ptr = pipe_buffer_map_range(pipe, ibuffer->buffer,
770 start * ibuffer->index_size, /* start */
771 count * ibuffer->index_size, /* length */
772 PIPE_TRANSFER_READ, &transfer);
773 if (!ptr)
774 return;
775
776 ptr = (uint8_t*)ptr + (ibuffer->offset - start * ibuffer->index_size);
777 }
778 else {
779 ptr = ib->ptr;
780 if (!ptr)
781 return;
782 }
783
784 sub_prims = find_sub_primitives(ptr, ibuffer->index_size,
785 0, count, orig_info->restart_index,
786 &num_sub_prims);
787
788 if (transfer)
789 pipe_buffer_unmap(pipe, transfer);
790
791 /* Now draw the sub primitives.
792 * Need to loop over instances as well to preserve draw order.
793 */
794 for (instance = 0; instance < orig_info->instance_count; instance++) {
795 info.start_instance = instance + orig_info->start_instance;
796 for (i = 0; i < num_sub_prims; i++) {
797 info.start = sub_prims[i].start;
798 info.count = sub_prims[i].count;
799 if (u_trim_pipe_prim(info.mode, &info.count)) {
800 cso_draw_vbo(cso, &info);
801 }
802 }
803 }
804
805 if (sub_prims)
806 free(sub_prims);
807 }
808
809
810 /**
811 * Translate OpenGL primtive type (GL_POINTS, GL_TRIANGLE_STRIP, etc) to
812 * the corresponding Gallium type.
813 */
814 static unsigned
815 translate_prim(const struct gl_context *ctx, unsigned prim)
816 {
817 /* GL prims should match Gallium prims, spot-check a few */
818 assert(GL_POINTS == PIPE_PRIM_POINTS);
819 assert(GL_QUADS == PIPE_PRIM_QUADS);
820 assert(GL_TRIANGLE_STRIP_ADJACENCY == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY);
821
822 /* Avoid quadstrips if it's easy to do so:
823 * Note: it's important to do the correct trimming if we change the
824 * prim type! We do that wherever this function is called.
825 */
826 if (prim == GL_QUAD_STRIP &&
827 ctx->Light.ShadeModel != GL_FLAT &&
828 ctx->Polygon.FrontMode == GL_FILL &&
829 ctx->Polygon.BackMode == GL_FILL)
830 prim = GL_TRIANGLE_STRIP;
831
832 return prim;
833 }
834
835
836 /**
837 * Setup vertex arrays and buffers prior to drawing.
838 * \return GL_TRUE for success, GL_FALSE otherwise (probably out of memory)
839 */
840 static GLboolean
841 st_validate_varrays(struct gl_context *ctx,
842 const struct gl_client_array **arrays)
843 {
844 struct st_context *st = st_context(ctx);
845 const struct st_vertex_program *vp;
846 const struct st_vp_variant *vpv;
847 struct pipe_vertex_buffer vbuffer[PIPE_MAX_SHADER_INPUTS];
848 struct pipe_vertex_element velements[PIPE_MAX_ATTRIBS];
849 unsigned num_vbuffers, num_velements;
850
851 /* must get these after state validation! */
852 vp = st->vp;
853 vpv = st->vp_variant;
854
855 memset(velements, 0, sizeof(struct pipe_vertex_element) * vpv->num_inputs);
856
857 /*
858 * Setup the vbuffer[] and velements[] arrays.
859 */
860 if (is_interleaved_arrays(vp, vpv, arrays)) {
861 if (!setup_interleaved_attribs(ctx, vp, vpv, arrays, vbuffer,
862 velements)) {
863 return GL_FALSE;
864 }
865
866 num_vbuffers = 1;
867 num_velements = vpv->num_inputs;
868 if (num_velements == 0)
869 num_vbuffers = 0;
870 }
871 else {
872 if (!setup_non_interleaved_attribs(ctx, vp, vpv, arrays,
873 vbuffer, velements)) {
874 return GL_FALSE;
875 }
876
877 num_vbuffers = vpv->num_inputs;
878 num_velements = vpv->num_inputs;
879 }
880
881 cso_set_vertex_buffers(st->cso_context, num_vbuffers, vbuffer);
882 cso_set_vertex_elements(st->cso_context, num_velements, velements);
883
884 return GL_TRUE;
885 }
886
887
888 /**
889 * This function gets plugged into the VBO module and is called when
890 * we have something to render.
891 * Basically, translate the information into the format expected by gallium.
892 */
893 void
894 st_draw_vbo(struct gl_context *ctx,
895 const struct gl_client_array **arrays,
896 const struct _mesa_prim *prims,
897 GLuint nr_prims,
898 const struct _mesa_index_buffer *ib,
899 GLboolean index_bounds_valid,
900 GLuint min_index,
901 GLuint max_index,
902 struct gl_transform_feedback_object *tfb_vertcount)
903 {
904 struct st_context *st = st_context(ctx);
905 struct pipe_context *pipe = st->pipe;
906 struct pipe_index_buffer ibuffer = {0};
907 struct pipe_draw_info info;
908 unsigned i;
909 GLboolean new_array =
910 st->dirty.st &&
911 (st->dirty.mesa & (_NEW_ARRAY | _NEW_PROGRAM | _NEW_BUFFER_OBJECT)) != 0;
912
913 /* Mesa core state should have been validated already */
914 assert(ctx->NewState == 0x0);
915
916 /* Validate state. */
917 if (st->dirty.st) {
918 GLboolean vertDataEdgeFlags;
919
920 vertDataEdgeFlags = arrays[VERT_ATTRIB_EDGEFLAG]->BufferObj &&
921 arrays[VERT_ATTRIB_EDGEFLAG]->BufferObj->Name;
922 if (vertDataEdgeFlags != st->vertdata_edgeflags) {
923 st->vertdata_edgeflags = vertDataEdgeFlags;
924 st->dirty.st |= ST_NEW_EDGEFLAGS_DATA;
925 }
926
927 st_validate_state(st);
928
929 if (new_array) {
930 if (!st_validate_varrays(ctx, arrays)) {
931 /* probably out of memory, no-op the draw call */
932 return;
933 }
934 }
935
936 #if 0
937 if (MESA_VERBOSE & VERBOSE_GLSL) {
938 check_uniforms(ctx);
939 }
940 #else
941 (void) check_uniforms;
942 #endif
943 }
944
945 util_draw_init_info(&info);
946 if (ib) {
947 /* Get index bounds for user buffers. */
948 if (!index_bounds_valid)
949 if (!all_varyings_in_vbos(arrays))
950 vbo_get_minmax_indices(ctx, prims, ib, &min_index, &max_index,
951 nr_prims);
952
953 setup_index_buffer(st, ib, &ibuffer);
954
955 info.indexed = TRUE;
956 if (min_index != ~0 && max_index != ~0) {
957 info.min_index = min_index;
958 info.max_index = max_index;
959 }
960
961 /* The VBO module handles restart for the non-indexed GLDrawArrays
962 * so we only set these fields for indexed drawing:
963 */
964 info.primitive_restart = ctx->Array.PrimitiveRestart;
965 info.restart_index = ctx->Array.RestartIndex;
966 }
967 else {
968 /* Transform feedback drawing is always non-indexed. */
969 /* Set info.count_from_stream_output. */
970 if (tfb_vertcount) {
971 st_transform_feedback_draw_init(tfb_vertcount, &info);
972 }
973 }
974
975 /* do actual drawing */
976 for (i = 0; i < nr_prims; i++) {
977 info.mode = translate_prim( ctx, prims[i].mode );
978 info.start = prims[i].start;
979 info.count = prims[i].count;
980 info.instance_count = prims[i].num_instances;
981 info.index_bias = prims[i].basevertex;
982 if (!ib) {
983 info.min_index = info.start;
984 info.max_index = info.start + info.count - 1;
985 }
986
987 if (info.count_from_stream_output) {
988 cso_draw_vbo(st->cso_context, &info);
989 }
990 else if (info.primitive_restart) {
991 if (st->sw_primitive_restart) {
992 /* Handle primitive restart for drivers that doesn't support it */
993 handle_fallback_primitive_restart(st->cso_context, pipe, ib,
994 &ibuffer, &info);
995 }
996 else {
997 /* don't trim, restarts might be inside index list */
998 cso_draw_vbo(st->cso_context, &info);
999 }
1000 }
1001 else if (u_trim_pipe_prim(info.mode, &info.count))
1002 cso_draw_vbo(st->cso_context, &info);
1003 }
1004
1005 if (ib && st->indexbuf_uploader && !_mesa_is_bufferobj(ib->obj)) {
1006 pipe_resource_reference(&ibuffer.buffer, NULL);
1007 }
1008 }
1009
1010
1011 void
1012 st_init_draw(struct st_context *st)
1013 {
1014 struct gl_context *ctx = st->ctx;
1015
1016 vbo_set_draw_func(ctx, st_draw_vbo);
1017
1018 #if FEATURE_feedback || FEATURE_rastpos
1019 st->draw = draw_create(st->pipe); /* for selection/feedback */
1020
1021 /* Disable draw options that might convert points/lines to tris, etc.
1022 * as that would foul-up feedback/selection mode.
1023 */
1024 draw_wide_line_threshold(st->draw, 1000.0f);
1025 draw_wide_point_threshold(st->draw, 1000.0f);
1026 draw_enable_line_stipple(st->draw, FALSE);
1027 draw_enable_point_sprites(st->draw, FALSE);
1028 #endif
1029 }
1030
1031
1032 void
1033 st_destroy_draw(struct st_context *st)
1034 {
1035 #if FEATURE_feedback || FEATURE_rastpos
1036 draw_destroy(st->draw);
1037 #endif
1038 }