6c62c5ec70ecaa61ca552c2080f90f0529499e1f
[mesa.git] / src / mesa / state_tracker / st_draw.c
1 /**************************************************************************
2 *
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /*
29 * This file implements the st_draw_vbo() function which is called from
30 * Mesa's VBO module. All point/line/triangle rendering is done through
31 * this function whether the user called glBegin/End, glDrawArrays,
32 * glDrawElements, glEvalMesh, or glCalList, etc.
33 *
34 * We basically convert the VBO's vertex attribute/array information into
35 * Gallium vertex state, bind the vertex buffer objects and call
36 * pipe->draw_vbo().
37 *
38 * Authors:
39 * Keith Whitwell <keith@tungstengraphics.com>
40 */
41
42
43 #include "main/imports.h"
44 #include "main/image.h"
45 #include "main/bufferobj.h"
46 #include "main/macros.h"
47 #include "main/mfeatures.h"
48 #include "program/prog_uniform.h"
49
50 #include "vbo/vbo.h"
51
52 #include "st_context.h"
53 #include "st_atom.h"
54 #include "st_cb_bufferobjects.h"
55 #include "st_draw.h"
56 #include "st_program.h"
57
58 #include "pipe/p_context.h"
59 #include "pipe/p_defines.h"
60 #include "util/u_inlines.h"
61 #include "util/u_format.h"
62 #include "util/u_prim.h"
63 #include "util/u_draw_quad.h"
64 #include "draw/draw_context.h"
65 #include "cso_cache/cso_context.h"
66
67
68 static GLuint double_types[4] = {
69 PIPE_FORMAT_R64_FLOAT,
70 PIPE_FORMAT_R64G64_FLOAT,
71 PIPE_FORMAT_R64G64B64_FLOAT,
72 PIPE_FORMAT_R64G64B64A64_FLOAT
73 };
74
75 static GLuint float_types[4] = {
76 PIPE_FORMAT_R32_FLOAT,
77 PIPE_FORMAT_R32G32_FLOAT,
78 PIPE_FORMAT_R32G32B32_FLOAT,
79 PIPE_FORMAT_R32G32B32A32_FLOAT
80 };
81
82 static GLuint half_float_types[4] = {
83 PIPE_FORMAT_R16_FLOAT,
84 PIPE_FORMAT_R16G16_FLOAT,
85 PIPE_FORMAT_R16G16B16_FLOAT,
86 PIPE_FORMAT_R16G16B16A16_FLOAT
87 };
88
89 static GLuint uint_types_norm[4] = {
90 PIPE_FORMAT_R32_UNORM,
91 PIPE_FORMAT_R32G32_UNORM,
92 PIPE_FORMAT_R32G32B32_UNORM,
93 PIPE_FORMAT_R32G32B32A32_UNORM
94 };
95
96 static GLuint uint_types_scale[4] = {
97 PIPE_FORMAT_R32_USCALED,
98 PIPE_FORMAT_R32G32_USCALED,
99 PIPE_FORMAT_R32G32B32_USCALED,
100 PIPE_FORMAT_R32G32B32A32_USCALED
101 };
102
103 static GLuint int_types_norm[4] = {
104 PIPE_FORMAT_R32_SNORM,
105 PIPE_FORMAT_R32G32_SNORM,
106 PIPE_FORMAT_R32G32B32_SNORM,
107 PIPE_FORMAT_R32G32B32A32_SNORM
108 };
109
110 static GLuint int_types_scale[4] = {
111 PIPE_FORMAT_R32_SSCALED,
112 PIPE_FORMAT_R32G32_SSCALED,
113 PIPE_FORMAT_R32G32B32_SSCALED,
114 PIPE_FORMAT_R32G32B32A32_SSCALED
115 };
116
117 static GLuint ushort_types_norm[4] = {
118 PIPE_FORMAT_R16_UNORM,
119 PIPE_FORMAT_R16G16_UNORM,
120 PIPE_FORMAT_R16G16B16_UNORM,
121 PIPE_FORMAT_R16G16B16A16_UNORM
122 };
123
124 static GLuint ushort_types_scale[4] = {
125 PIPE_FORMAT_R16_USCALED,
126 PIPE_FORMAT_R16G16_USCALED,
127 PIPE_FORMAT_R16G16B16_USCALED,
128 PIPE_FORMAT_R16G16B16A16_USCALED
129 };
130
131 static GLuint short_types_norm[4] = {
132 PIPE_FORMAT_R16_SNORM,
133 PIPE_FORMAT_R16G16_SNORM,
134 PIPE_FORMAT_R16G16B16_SNORM,
135 PIPE_FORMAT_R16G16B16A16_SNORM
136 };
137
138 static GLuint short_types_scale[4] = {
139 PIPE_FORMAT_R16_SSCALED,
140 PIPE_FORMAT_R16G16_SSCALED,
141 PIPE_FORMAT_R16G16B16_SSCALED,
142 PIPE_FORMAT_R16G16B16A16_SSCALED
143 };
144
145 static GLuint ubyte_types_norm[4] = {
146 PIPE_FORMAT_R8_UNORM,
147 PIPE_FORMAT_R8G8_UNORM,
148 PIPE_FORMAT_R8G8B8_UNORM,
149 PIPE_FORMAT_R8G8B8A8_UNORM
150 };
151
152 static GLuint ubyte_types_scale[4] = {
153 PIPE_FORMAT_R8_USCALED,
154 PIPE_FORMAT_R8G8_USCALED,
155 PIPE_FORMAT_R8G8B8_USCALED,
156 PIPE_FORMAT_R8G8B8A8_USCALED
157 };
158
159 static GLuint byte_types_norm[4] = {
160 PIPE_FORMAT_R8_SNORM,
161 PIPE_FORMAT_R8G8_SNORM,
162 PIPE_FORMAT_R8G8B8_SNORM,
163 PIPE_FORMAT_R8G8B8A8_SNORM
164 };
165
166 static GLuint byte_types_scale[4] = {
167 PIPE_FORMAT_R8_SSCALED,
168 PIPE_FORMAT_R8G8_SSCALED,
169 PIPE_FORMAT_R8G8B8_SSCALED,
170 PIPE_FORMAT_R8G8B8A8_SSCALED
171 };
172
173 static GLuint fixed_types[4] = {
174 PIPE_FORMAT_R32_FIXED,
175 PIPE_FORMAT_R32G32_FIXED,
176 PIPE_FORMAT_R32G32B32_FIXED,
177 PIPE_FORMAT_R32G32B32A32_FIXED
178 };
179
180
181
182 /**
183 * Return a PIPE_FORMAT_x for the given GL datatype and size.
184 */
185 enum pipe_format
186 st_pipe_vertex_format(GLenum type, GLuint size, GLenum format,
187 GLboolean normalized)
188 {
189 assert((type >= GL_BYTE && type <= GL_DOUBLE) ||
190 type == GL_FIXED || type == GL_HALF_FLOAT ||
191 type == GL_INT_2_10_10_10_REV ||
192 type == GL_UNSIGNED_INT_2_10_10_10_REV);
193 assert(size >= 1);
194 assert(size <= 4);
195 assert(format == GL_RGBA || format == GL_BGRA);
196
197 if (type == GL_INT_2_10_10_10_REV ||
198 type == GL_UNSIGNED_INT_2_10_10_10_REV) {
199 assert(size == 4);
200
201 if (format == GL_BGRA) {
202 if (type == GL_INT_2_10_10_10_REV) {
203 if (normalized)
204 return PIPE_FORMAT_B10G10R10A2_SNORM;
205 else
206 return PIPE_FORMAT_B10G10R10A2_SSCALED;
207 } else {
208 if (normalized)
209 return PIPE_FORMAT_B10G10R10A2_UNORM;
210 else
211 return PIPE_FORMAT_B10G10R10A2_USCALED;
212 }
213 } else {
214 if (type == GL_INT_2_10_10_10_REV) {
215 if (normalized)
216 return PIPE_FORMAT_R10G10B10A2_SNORM;
217 else
218 return PIPE_FORMAT_R10G10B10A2_SSCALED;
219 } else {
220 if (normalized)
221 return PIPE_FORMAT_R10G10B10A2_UNORM;
222 else
223 return PIPE_FORMAT_R10G10B10A2_USCALED;
224 }
225 }
226 }
227
228 if (format == GL_BGRA) {
229 /* this is an odd-ball case */
230 assert(type == GL_UNSIGNED_BYTE);
231 assert(normalized);
232 return PIPE_FORMAT_B8G8R8A8_UNORM;
233 }
234
235 if (normalized) {
236 switch (type) {
237 case GL_DOUBLE: return double_types[size-1];
238 case GL_FLOAT: return float_types[size-1];
239 case GL_HALF_FLOAT: return half_float_types[size-1];
240 case GL_INT: return int_types_norm[size-1];
241 case GL_SHORT: return short_types_norm[size-1];
242 case GL_BYTE: return byte_types_norm[size-1];
243 case GL_UNSIGNED_INT: return uint_types_norm[size-1];
244 case GL_UNSIGNED_SHORT: return ushort_types_norm[size-1];
245 case GL_UNSIGNED_BYTE: return ubyte_types_norm[size-1];
246 case GL_FIXED: return fixed_types[size-1];
247 default: assert(0); return 0;
248 }
249 }
250 else {
251 switch (type) {
252 case GL_DOUBLE: return double_types[size-1];
253 case GL_FLOAT: return float_types[size-1];
254 case GL_HALF_FLOAT: return half_float_types[size-1];
255 case GL_INT: return int_types_scale[size-1];
256 case GL_SHORT: return short_types_scale[size-1];
257 case GL_BYTE: return byte_types_scale[size-1];
258 case GL_UNSIGNED_INT: return uint_types_scale[size-1];
259 case GL_UNSIGNED_SHORT: return ushort_types_scale[size-1];
260 case GL_UNSIGNED_BYTE: return ubyte_types_scale[size-1];
261 case GL_FIXED: return fixed_types[size-1];
262 default: assert(0); return 0;
263 }
264 }
265 return PIPE_FORMAT_NONE; /* silence compiler warning */
266 }
267
268
269 /**
270 * This is very similar to vbo_all_varyings_in_vbos() but we are
271 * only interested in per-vertex data. See bug 38626.
272 */
273 static GLboolean
274 all_varyings_in_vbos(const struct gl_client_array *arrays[])
275 {
276 GLuint i;
277
278 for (i = 0; i < VERT_ATTRIB_MAX; i++)
279 if (arrays[i]->StrideB &&
280 !arrays[i]->InstanceDivisor &&
281 !_mesa_is_bufferobj(arrays[i]->BufferObj))
282 return GL_FALSE;
283
284 return GL_TRUE;
285 }
286
287
288 /**
289 * Examine the active arrays to determine if we have interleaved
290 * vertex arrays all living in one VBO, or all living in user space.
291 */
292 static GLboolean
293 is_interleaved_arrays(const struct st_vertex_program *vp,
294 const struct st_vp_variant *vpv,
295 const struct gl_client_array **arrays)
296 {
297 GLuint attr;
298 const struct gl_buffer_object *firstBufObj = NULL;
299 GLint firstStride = -1;
300 const GLubyte *firstPtr = NULL;
301 GLboolean userSpaceBuffer = GL_FALSE;
302
303 for (attr = 0; attr < vpv->num_inputs; attr++) {
304 const GLuint mesaAttr = vp->index_to_input[attr];
305 const struct gl_client_array *array = arrays[mesaAttr];
306 const struct gl_buffer_object *bufObj = array->BufferObj;
307 const GLsizei stride = array->StrideB; /* in bytes */
308
309 if (attr == 0) {
310 /* save info about the first array */
311 firstStride = stride;
312 firstPtr = array->Ptr;
313 firstBufObj = bufObj;
314 userSpaceBuffer = !bufObj || !bufObj->Name;
315 }
316 else {
317 /* check if other arrays interleave with the first, in same buffer */
318 if (stride != firstStride)
319 return GL_FALSE; /* strides don't match */
320
321 if (bufObj != firstBufObj)
322 return GL_FALSE; /* arrays in different VBOs */
323
324 if (abs(array->Ptr - firstPtr) > firstStride)
325 return GL_FALSE; /* arrays start too far apart */
326
327 if ((!bufObj || !_mesa_is_bufferobj(bufObj)) != userSpaceBuffer)
328 return GL_FALSE; /* mix of VBO and user-space arrays */
329 }
330 }
331
332 return GL_TRUE;
333 }
334
335
336 /**
337 * Set up for drawing interleaved arrays that all live in one VBO
338 * or all live in user space.
339 * \param vbuffer returns vertex buffer info
340 * \param velements returns vertex element info
341 * \return GL_TRUE for success, GL_FALSE otherwise (probably out of memory)
342 */
343 static GLboolean
344 setup_interleaved_attribs(struct gl_context *ctx,
345 const struct st_vertex_program *vp,
346 const struct st_vp_variant *vpv,
347 const struct gl_client_array **arrays,
348 struct pipe_vertex_buffer *vbuffer,
349 struct pipe_vertex_element velements[],
350 unsigned max_index,
351 unsigned num_instances)
352 {
353 struct st_context *st = st_context(ctx);
354 struct pipe_context *pipe = st->pipe;
355 GLuint attr;
356 const GLubyte *low_addr = NULL;
357 GLboolean usingVBO; /* all arrays in a VBO? */
358 struct gl_buffer_object *bufobj;
359 GLuint user_buffer_size = 0;
360 GLuint vertex_size = 0; /* bytes per vertex, in bytes */
361 GLsizei stride;
362
363 /* Find the lowest address of the arrays we're drawing,
364 * Init bufobj and stride.
365 */
366 if (vpv->num_inputs) {
367 const GLuint mesaAttr0 = vp->index_to_input[0];
368 const struct gl_client_array *array = arrays[mesaAttr0];
369
370 /* Since we're doing interleaved arrays, we know there'll be at most
371 * one buffer object and the stride will be the same for all arrays.
372 * Grab them now.
373 */
374 bufobj = array->BufferObj;
375 stride = array->StrideB;
376
377 low_addr = arrays[vp->index_to_input[0]]->Ptr;
378
379 for (attr = 1; attr < vpv->num_inputs; attr++) {
380 const GLubyte *start = arrays[vp->index_to_input[attr]]->Ptr;
381 low_addr = MIN2(low_addr, start);
382 }
383 }
384 else {
385 /* not sure we'll ever have zero inputs, but play it safe */
386 bufobj = NULL;
387 stride = 0;
388 low_addr = 0;
389 }
390
391 /* are the arrays in user space? */
392 usingVBO = bufobj && _mesa_is_bufferobj(bufobj);
393
394 for (attr = 0; attr < vpv->num_inputs; attr++) {
395 const GLuint mesaAttr = vp->index_to_input[attr];
396 const struct gl_client_array *array = arrays[mesaAttr];
397 unsigned src_offset = (unsigned) (array->Ptr - low_addr);
398 GLuint element_size = array->_ElementSize;
399
400 assert(element_size == array->Size * _mesa_sizeof_type(array->Type));
401
402 velements[attr].src_offset = src_offset;
403 velements[attr].instance_divisor = array->InstanceDivisor;
404 velements[attr].vertex_buffer_index = 0;
405 velements[attr].src_format = st_pipe_vertex_format(array->Type,
406 array->Size,
407 array->Format,
408 array->Normalized);
409 assert(velements[attr].src_format);
410
411 if (!usingVBO) {
412 /* how many bytes referenced by this attribute array? */
413 uint divisor = array->InstanceDivisor;
414 uint last_index = divisor ? num_instances / divisor : max_index;
415 uint bytes = src_offset + stride * last_index + element_size;
416
417 user_buffer_size = MAX2(user_buffer_size, bytes);
418
419 /* update vertex size */
420 vertex_size = MAX2(vertex_size, src_offset + element_size);
421 }
422 }
423
424 /*
425 * Return the vbuffer info and setup user-space attrib info, if needed.
426 */
427 if (vpv->num_inputs == 0) {
428 /* just defensive coding here */
429 vbuffer->buffer = NULL;
430 vbuffer->buffer_offset = 0;
431 vbuffer->stride = 0;
432 st->num_user_attribs = 0;
433 }
434 else if (usingVBO) {
435 /* all interleaved arrays in a VBO */
436 struct st_buffer_object *stobj = st_buffer_object(bufobj);
437
438 if (!stobj) {
439 /* probably out of memory */
440 return GL_FALSE;
441 }
442
443 vbuffer->buffer = NULL;
444 pipe_resource_reference(&vbuffer->buffer, stobj->buffer);
445 vbuffer->buffer_offset = pointer_to_offset(low_addr);
446 vbuffer->stride = stride;
447 st->num_user_attribs = 0;
448 }
449 else {
450 /* all interleaved arrays in user memory */
451 vbuffer->buffer = pipe_user_buffer_create(pipe->screen,
452 (void*) low_addr,
453 user_buffer_size,
454 PIPE_BIND_VERTEX_BUFFER);
455 vbuffer->buffer_offset = 0;
456 vbuffer->stride = stride;
457
458 /* Track user vertex buffers. */
459 pipe_resource_reference(&st->user_attrib[0].buffer, vbuffer->buffer);
460 st->user_attrib[0].element_size = vertex_size;
461 st->user_attrib[0].stride = stride;
462 st->num_user_attribs = 1;
463 }
464
465 return GL_TRUE;
466 }
467
468
469 /**
470 * Set up a separate pipe_vertex_buffer and pipe_vertex_element for each
471 * vertex attribute.
472 * \param vbuffer returns vertex buffer info
473 * \param velements returns vertex element info
474 * \return GL_TRUE for success, GL_FALSE otherwise (probably out of memory)
475 */
476 static GLboolean
477 setup_non_interleaved_attribs(struct gl_context *ctx,
478 const struct st_vertex_program *vp,
479 const struct st_vp_variant *vpv,
480 const struct gl_client_array **arrays,
481 struct pipe_vertex_buffer vbuffer[],
482 struct pipe_vertex_element velements[],
483 unsigned max_index,
484 unsigned num_instances)
485 {
486 struct st_context *st = st_context(ctx);
487 struct pipe_context *pipe = st->pipe;
488 GLuint attr;
489
490 for (attr = 0; attr < vpv->num_inputs; attr++) {
491 const GLuint mesaAttr = vp->index_to_input[attr];
492 const struct gl_client_array *array = arrays[mesaAttr];
493 struct gl_buffer_object *bufobj = array->BufferObj;
494 GLuint element_size = array->_ElementSize;
495 GLsizei stride = array->StrideB;
496
497 assert(element_size == array->Size * _mesa_sizeof_type(array->Type));
498
499 if (bufobj && _mesa_is_bufferobj(bufobj)) {
500 /* Attribute data is in a VBO.
501 * Recall that for VBOs, the gl_client_array->Ptr field is
502 * really an offset from the start of the VBO, not a pointer.
503 */
504 struct st_buffer_object *stobj = st_buffer_object(bufobj);
505
506 if (!stobj || !stobj->buffer) {
507 /* probably ran out of memory */
508 return GL_FALSE;
509 }
510
511 vbuffer[attr].buffer = NULL;
512 pipe_resource_reference(&vbuffer[attr].buffer, stobj->buffer);
513 vbuffer[attr].buffer_offset = pointer_to_offset(array->Ptr);
514 }
515 else {
516 /* wrap user data */
517 uint bytes;
518 void *ptr;
519
520 if (array->Ptr) {
521 uint divisor = array->InstanceDivisor;
522 uint last_index = divisor ? num_instances / divisor : max_index;
523
524 bytes = stride * last_index + element_size;
525
526 ptr = (void *) array->Ptr;
527 }
528 else {
529 /* no array, use ctx->Current.Attrib[] value */
530 bytes = element_size = sizeof(ctx->Current.Attrib[0]);
531 ptr = (void *) ctx->Current.Attrib[mesaAttr];
532 stride = 0;
533 }
534
535 assert(ptr);
536 assert(bytes);
537
538 vbuffer[attr].buffer =
539 pipe_user_buffer_create(pipe->screen, ptr, bytes,
540 PIPE_BIND_VERTEX_BUFFER);
541
542 vbuffer[attr].buffer_offset = 0;
543
544 /* Track user vertex buffers. */
545 pipe_resource_reference(&st->user_attrib[attr].buffer, vbuffer[attr].buffer);
546 st->user_attrib[attr].element_size = element_size;
547 st->user_attrib[attr].stride = stride;
548 st->num_user_attribs = MAX2(st->num_user_attribs, attr + 1);
549
550 if (!vbuffer[attr].buffer) {
551 /* probably ran out of memory */
552 return GL_FALSE;
553 }
554 }
555
556 /* common-case setup */
557 vbuffer[attr].stride = stride; /* in bytes */
558
559 velements[attr].src_offset = 0;
560 velements[attr].instance_divisor = array->InstanceDivisor;
561 velements[attr].vertex_buffer_index = attr;
562 velements[attr].src_format = st_pipe_vertex_format(array->Type,
563 array->Size,
564 array->Format,
565 array->Normalized);
566 assert(velements[attr].src_format);
567 }
568
569 return GL_TRUE;
570 }
571
572
573 static void
574 setup_index_buffer(struct gl_context *ctx,
575 const struct _mesa_index_buffer *ib,
576 struct pipe_index_buffer *ibuffer)
577 {
578 struct st_context *st = st_context(ctx);
579 struct pipe_context *pipe = st->pipe;
580
581 memset(ibuffer, 0, sizeof(*ibuffer));
582 if (ib) {
583 struct gl_buffer_object *bufobj = ib->obj;
584
585 switch (ib->type) {
586 case GL_UNSIGNED_INT:
587 ibuffer->index_size = 4;
588 break;
589 case GL_UNSIGNED_SHORT:
590 ibuffer->index_size = 2;
591 break;
592 case GL_UNSIGNED_BYTE:
593 ibuffer->index_size = 1;
594 break;
595 default:
596 assert(0);
597 return;
598 }
599
600 /* get/create the index buffer object */
601 if (bufobj && _mesa_is_bufferobj(bufobj)) {
602 /* elements/indexes are in a real VBO */
603 struct st_buffer_object *stobj = st_buffer_object(bufobj);
604 pipe_resource_reference(&ibuffer->buffer, stobj->buffer);
605 ibuffer->offset = pointer_to_offset(ib->ptr);
606 }
607 else {
608 /* element/indicies are in user space memory */
609 ibuffer->buffer =
610 pipe_user_buffer_create(pipe->screen, (void *) ib->ptr,
611 ib->count * ibuffer->index_size,
612 PIPE_BIND_INDEX_BUFFER);
613 }
614 }
615 }
616
617
618 /**
619 * Prior to drawing, check that any uniforms referenced by the
620 * current shader have been set. If a uniform has not been set,
621 * issue a warning.
622 */
623 static void
624 check_uniforms(struct gl_context *ctx)
625 {
626 struct gl_shader_program *shProg[3] = {
627 ctx->Shader.CurrentVertexProgram,
628 ctx->Shader.CurrentGeometryProgram,
629 ctx->Shader.CurrentFragmentProgram,
630 };
631 unsigned j;
632
633 for (j = 0; j < 3; j++) {
634 unsigned i;
635
636 if (shProg[j] == NULL || !shProg[j]->LinkStatus)
637 continue;
638
639 for (i = 0; i < shProg[j]->Uniforms->NumUniforms; i++) {
640 const struct gl_uniform *u = &shProg[j]->Uniforms->Uniforms[i];
641 if (!u->Initialized) {
642 _mesa_warning(ctx,
643 "Using shader with uninitialized uniform: %s",
644 u->Name);
645 }
646 }
647 }
648 }
649
650 /** Helper code for primitive restart fallback */
651 #define DO_DRAW(pipe, cur_start, cur_count) \
652 do { \
653 info.start = cur_start; \
654 info.count = cur_count; \
655 if (u_trim_pipe_prim(info.mode, &info.count)) { \
656 if (transfer) \
657 pipe_buffer_unmap(pipe, transfer); \
658 pipe->draw_vbo(pipe, &info); \
659 if (transfer) { \
660 ptr = pipe_buffer_map(pipe, ibuffer->buffer, PIPE_TRANSFER_READ, &transfer); \
661 assert(ptr != NULL); \
662 ptr = ADD_POINTERS(ptr, ibuffer->offset); \
663 } \
664 } \
665 } while(0)
666
667 /** More helper code for primitive restart fallback */
668 #define PRIM_RESTART_LOOP(elements) \
669 do { \
670 for (i = start; i < end; i++) { \
671 if (elements[i] == info.restart_index) { \
672 if (cur_count > 0) { \
673 /* draw elts up to prev pos */ \
674 DO_DRAW(pipe, cur_start, cur_count); \
675 } \
676 /* begin new prim at next elt */ \
677 cur_start = i + 1; \
678 cur_count = 0; \
679 } \
680 else { \
681 cur_count++; \
682 } \
683 } \
684 if (cur_count > 0) { \
685 DO_DRAW(pipe, cur_start, cur_count); \
686 } \
687 } while (0)
688
689 static void
690 handle_fallback_primitive_restart(struct pipe_context *pipe,
691 const struct _mesa_index_buffer *ib,
692 struct pipe_index_buffer *ibuffer,
693 struct pipe_draw_info *orig_info)
694 {
695 const unsigned start = orig_info->start;
696 const unsigned count = orig_info->count;
697 const unsigned end = start + count;
698 struct pipe_draw_info info = *orig_info;
699 struct pipe_transfer *transfer = NULL;
700 unsigned instance, i, cur_start, cur_count;
701 const void *ptr;
702
703 info.primitive_restart = FALSE;
704
705 if (!info.indexed) {
706 /* Splitting the draw arrays call is handled by the VBO module */
707 if (u_trim_pipe_prim(info.mode, &info.count))
708 pipe->draw_vbo(pipe, &info);
709
710 return;
711 }
712
713 /* info.indexed == TRUE */
714 assert(ibuffer);
715 assert(ibuffer->buffer);
716
717 if (ib) {
718 struct gl_buffer_object *bufobj = ib->obj;
719 if (bufobj && bufobj->Name) {
720 ptr = NULL;
721 }
722 else {
723 ptr = ib->ptr;
724 }
725 } else {
726 ptr = NULL;
727 }
728
729 if (!ptr)
730 ptr = pipe_buffer_map(pipe, ibuffer->buffer, PIPE_TRANSFER_READ, &transfer);
731
732 if (!ptr)
733 return;
734 ptr = ADD_POINTERS(ptr, ibuffer->offset);
735
736 /* Need to loop over instances as well to preserve draw order */
737 for (instance = 0; instance < orig_info->instance_count; instance++) {
738 info.start_instance = instance + orig_info->start_instance;
739 info.instance_count = 1;
740 cur_start = start;
741 cur_count = 0;
742
743 switch (ibuffer->index_size) {
744 case 1:
745 {
746 const ubyte *elt_ub = (const ubyte *)ptr;
747 PRIM_RESTART_LOOP(elt_ub);
748 }
749 break;
750 case 2:
751 {
752 const ushort *elt_us = (const ushort *)ptr;
753 PRIM_RESTART_LOOP(elt_us);
754 }
755 break;
756 case 4:
757 {
758 const uint *elt_ui = (const uint *)ptr;
759 PRIM_RESTART_LOOP(elt_ui);
760 }
761 break;
762 default:
763 assert(0 && "bad index_size in handle_fallback_primitive_restart()");
764 }
765 }
766
767 if (transfer)
768 pipe_buffer_unmap(pipe, transfer);
769 }
770
771
772 /**
773 * Translate OpenGL primtive type (GL_POINTS, GL_TRIANGLE_STRIP, etc) to
774 * the corresponding Gallium type.
775 */
776 static unsigned
777 translate_prim(const struct gl_context *ctx, unsigned prim)
778 {
779 /* GL prims should match Gallium prims, spot-check a few */
780 assert(GL_POINTS == PIPE_PRIM_POINTS);
781 assert(GL_QUADS == PIPE_PRIM_QUADS);
782 assert(GL_TRIANGLE_STRIP_ADJACENCY == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY);
783
784 /* Avoid quadstrips if it's easy to do so:
785 * Note: it's important to do the correct trimming if we change the
786 * prim type! We do that wherever this function is called.
787 */
788 if (prim == GL_QUAD_STRIP &&
789 ctx->Light.ShadeModel != GL_FLAT &&
790 ctx->Polygon.FrontMode == GL_FILL &&
791 ctx->Polygon.BackMode == GL_FILL)
792 prim = GL_TRIANGLE_STRIP;
793
794 return prim;
795 }
796
797
798 /**
799 * Setup vertex arrays and buffers prior to drawing.
800 * \return GL_TRUE for success, GL_FALSE otherwise (probably out of memory)
801 */
802 static GLboolean
803 st_validate_varrays(struct gl_context *ctx,
804 const struct gl_client_array **arrays,
805 unsigned max_index,
806 unsigned num_instances)
807 {
808 struct st_context *st = st_context(ctx);
809 const struct st_vertex_program *vp;
810 const struct st_vp_variant *vpv;
811 struct pipe_vertex_buffer vbuffer[PIPE_MAX_SHADER_INPUTS];
812 struct pipe_vertex_element velements[PIPE_MAX_ATTRIBS];
813 unsigned num_vbuffers, num_velements;
814 GLuint attr;
815 unsigned i;
816
817 /* must get these after state validation! */
818 vp = st->vp;
819 vpv = st->vp_variant;
820
821 memset(velements, 0, sizeof(struct pipe_vertex_element) * vpv->num_inputs);
822
823 /* Unreference any user vertex buffers. */
824 for (i = 0; i < st->num_user_attribs; i++) {
825 pipe_resource_reference(&st->user_attrib[i].buffer, NULL);
826 }
827 st->num_user_attribs = 0;
828
829 /*
830 * Setup the vbuffer[] and velements[] arrays.
831 */
832 if (is_interleaved_arrays(vp, vpv, arrays)) {
833 if (!setup_interleaved_attribs(ctx, vp, vpv, arrays, vbuffer, velements,
834 max_index, num_instances)) {
835 return GL_FALSE;
836 }
837
838 num_vbuffers = 1;
839 num_velements = vpv->num_inputs;
840 if (num_velements == 0)
841 num_vbuffers = 0;
842 }
843 else {
844 if (!setup_non_interleaved_attribs(ctx, vp, vpv, arrays,
845 vbuffer, velements, max_index,
846 num_instances)) {
847 return GL_FALSE;
848 }
849
850 num_vbuffers = vpv->num_inputs;
851 num_velements = vpv->num_inputs;
852 }
853
854 cso_set_vertex_buffers(st->cso_context, num_vbuffers, vbuffer);
855 cso_set_vertex_elements(st->cso_context, num_velements, velements);
856
857 /* unreference buffers (frees wrapped user-space buffer objects)
858 * This is OK, because the pipe driver should reference buffers by itself
859 * in set_vertex_buffers. */
860 for (attr = 0; attr < num_vbuffers; attr++) {
861 pipe_resource_reference(&vbuffer[attr].buffer, NULL);
862 assert(!vbuffer[attr].buffer);
863 }
864
865 return GL_TRUE;
866 }
867
868
869 /**
870 * This function gets plugged into the VBO module and is called when
871 * we have something to render.
872 * Basically, translate the information into the format expected by gallium.
873 */
874 void
875 st_draw_vbo(struct gl_context *ctx,
876 const struct gl_client_array **arrays,
877 const struct _mesa_prim *prims,
878 GLuint nr_prims,
879 const struct _mesa_index_buffer *ib,
880 GLboolean index_bounds_valid,
881 GLuint min_index,
882 GLuint max_index)
883 {
884 struct st_context *st = st_context(ctx);
885 struct pipe_context *pipe = st->pipe;
886 struct pipe_index_buffer ibuffer;
887 struct pipe_draw_info info;
888 unsigned i, num_instances = 1;
889 GLboolean new_array =
890 st->dirty.st &&
891 (st->dirty.mesa & (_NEW_ARRAY | _NEW_PROGRAM | _NEW_BUFFER_OBJECT)) != 0;
892
893 /* Mesa core state should have been validated already */
894 assert(ctx->NewState == 0x0);
895
896 if (ib) {
897 /* Gallium probably doesn't want this in some cases. */
898 if (!index_bounds_valid)
899 if (!all_varyings_in_vbos(arrays))
900 vbo_get_minmax_index(ctx, prims, ib, &min_index, &max_index);
901
902 for (i = 0; i < nr_prims; i++) {
903 num_instances = MAX2(num_instances, prims[i].num_instances);
904 }
905 }
906 else {
907 /* Get min/max index for non-indexed drawing. */
908 min_index = ~0;
909 max_index = 0;
910
911 for (i = 0; i < nr_prims; i++) {
912 min_index = MIN2(min_index, prims[i].start);
913 max_index = MAX2(max_index, prims[i].start + prims[i].count - 1);
914 num_instances = MAX2(num_instances, prims[i].num_instances);
915 }
916 }
917
918 /* Validate state. */
919 if (st->dirty.st) {
920 GLboolean vertDataEdgeFlags;
921
922 /* sanity check for pointer arithmetic below */
923 assert(sizeof(arrays[0]->Ptr[0]) == 1);
924
925 vertDataEdgeFlags = arrays[VERT_ATTRIB_EDGEFLAG]->BufferObj &&
926 arrays[VERT_ATTRIB_EDGEFLAG]->BufferObj->Name;
927 if (vertDataEdgeFlags != st->vertdata_edgeflags) {
928 st->vertdata_edgeflags = vertDataEdgeFlags;
929 st->dirty.st |= ST_NEW_EDGEFLAGS_DATA;
930 }
931
932 st_validate_state(st);
933
934 if (new_array) {
935 if (!st_validate_varrays(ctx, arrays, max_index, num_instances)) {
936 /* probably out of memory, no-op the draw call */
937 return;
938 }
939 }
940
941 #if 0
942 if (MESA_VERBOSE & VERBOSE_GLSL) {
943 check_uniforms(ctx);
944 }
945 #else
946 (void) check_uniforms;
947 #endif
948 }
949
950 /* Notify the driver that the content of user buffers may have been
951 * changed. */
952 assert(max_index >= min_index);
953 if (!new_array && st->num_user_attribs) {
954 for (i = 0; i < st->num_user_attribs; i++) {
955 if (st->user_attrib[i].buffer) {
956 unsigned element_size = st->user_attrib[i].element_size;
957 unsigned stride = st->user_attrib[i].stride;
958 unsigned min_offset = min_index * stride;
959 unsigned max_offset = max_index * stride + element_size;
960
961 assert(max_offset > min_offset);
962
963 pipe->redefine_user_buffer(pipe, st->user_attrib[i].buffer,
964 min_offset,
965 max_offset - min_offset);
966 }
967 }
968 }
969
970 setup_index_buffer(ctx, ib, &ibuffer);
971 pipe->set_index_buffer(pipe, &ibuffer);
972
973 util_draw_init_info(&info);
974 if (ib) {
975 info.indexed = TRUE;
976 if (min_index != ~0 && max_index != ~0) {
977 info.min_index = min_index;
978 info.max_index = max_index;
979 }
980 }
981
982 info.primitive_restart = ctx->Array.PrimitiveRestart;
983 info.restart_index = ctx->Array.RestartIndex;
984
985 /* do actual drawing */
986 for (i = 0; i < nr_prims; i++) {
987 info.mode = translate_prim( ctx, prims[i].mode );
988 info.start = prims[i].start;
989 info.count = prims[i].count;
990 info.instance_count = prims[i].num_instances;
991 info.index_bias = prims[i].basevertex;
992 if (!ib) {
993 info.min_index = info.start;
994 info.max_index = info.start + info.count - 1;
995 }
996
997 if (info.primitive_restart) {
998 /*
999 * Handle primitive restart for drivers that doesn't support it.
1000 *
1001 * The VBO module handles restart inside of draw_arrays for us,
1002 * but we should still remove the primitive_restart flag on the
1003 * info struct, the fallback function does this for us. Just
1004 * remove the flag for all drivers in this case as well.
1005 */
1006 if (st->sw_primitive_restart || !info.indexed)
1007 handle_fallback_primitive_restart(pipe, ib, &ibuffer, &info);
1008 else
1009 /* don't trim, restarts might be inside index list */
1010 pipe->draw_vbo(pipe, &info);
1011 }
1012 else if (u_trim_pipe_prim(info.mode, &info.count))
1013 pipe->draw_vbo(pipe, &info);
1014 }
1015
1016 pipe_resource_reference(&ibuffer.buffer, NULL);
1017 }
1018
1019
1020 void
1021 st_init_draw(struct st_context *st)
1022 {
1023 struct gl_context *ctx = st->ctx;
1024
1025 vbo_set_draw_func(ctx, st_draw_vbo);
1026
1027 #if FEATURE_feedback || FEATURE_rastpos
1028 st->draw = draw_create(st->pipe); /* for selection/feedback */
1029
1030 /* Disable draw options that might convert points/lines to tris, etc.
1031 * as that would foul-up feedback/selection mode.
1032 */
1033 draw_wide_line_threshold(st->draw, 1000.0f);
1034 draw_wide_point_threshold(st->draw, 1000.0f);
1035 draw_enable_line_stipple(st->draw, FALSE);
1036 draw_enable_point_sprites(st->draw, FALSE);
1037 #endif
1038 }
1039
1040
1041 void
1042 st_destroy_draw(struct st_context *st)
1043 {
1044 #if FEATURE_feedback || FEATURE_rastpos
1045 draw_destroy(st->draw);
1046 #endif
1047 }