2 /**************************************************************************
4 * Copyright 2007 VMware, Inc.
5 * Copyright 2012 Marek Olšák <maraeo@gmail.com>
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 **************************************************************************/
31 * This converts the VBO's vertex attribute/array information into
32 * Gallium vertex state and binds it.
35 * Keith Whitwell <keithw@vmware.com>
36 * Marek Olšák <maraeo@gmail.com>
39 #include "st_context.h"
41 #include "st_cb_bufferobjects.h"
43 #include "st_program.h"
45 #include "cso_cache/cso_context.h"
46 #include "util/u_math.h"
47 #include "util/u_upload_mgr.h"
48 #include "main/bufferobj.h"
49 #include "main/glformats.h"
50 #include "main/varray.h"
52 /* vertex_formats[gltype - GL_BYTE][integer*2 + normalized][size - 1] */
53 static const uint16_t vertex_formats
[][4][4] = {
56 PIPE_FORMAT_R8_SSCALED
,
57 PIPE_FORMAT_R8G8_SSCALED
,
58 PIPE_FORMAT_R8G8B8_SSCALED
,
59 PIPE_FORMAT_R8G8B8A8_SSCALED
63 PIPE_FORMAT_R8G8_SNORM
,
64 PIPE_FORMAT_R8G8B8_SNORM
,
65 PIPE_FORMAT_R8G8B8A8_SNORM
69 PIPE_FORMAT_R8G8_SINT
,
70 PIPE_FORMAT_R8G8B8_SINT
,
71 PIPE_FORMAT_R8G8B8A8_SINT
74 { /* GL_UNSIGNED_BYTE */
76 PIPE_FORMAT_R8_USCALED
,
77 PIPE_FORMAT_R8G8_USCALED
,
78 PIPE_FORMAT_R8G8B8_USCALED
,
79 PIPE_FORMAT_R8G8B8A8_USCALED
83 PIPE_FORMAT_R8G8_UNORM
,
84 PIPE_FORMAT_R8G8B8_UNORM
,
85 PIPE_FORMAT_R8G8B8A8_UNORM
89 PIPE_FORMAT_R8G8_UINT
,
90 PIPE_FORMAT_R8G8B8_UINT
,
91 PIPE_FORMAT_R8G8B8A8_UINT
96 PIPE_FORMAT_R16_SSCALED
,
97 PIPE_FORMAT_R16G16_SSCALED
,
98 PIPE_FORMAT_R16G16B16_SSCALED
,
99 PIPE_FORMAT_R16G16B16A16_SSCALED
102 PIPE_FORMAT_R16_SNORM
,
103 PIPE_FORMAT_R16G16_SNORM
,
104 PIPE_FORMAT_R16G16B16_SNORM
,
105 PIPE_FORMAT_R16G16B16A16_SNORM
108 PIPE_FORMAT_R16_SINT
,
109 PIPE_FORMAT_R16G16_SINT
,
110 PIPE_FORMAT_R16G16B16_SINT
,
111 PIPE_FORMAT_R16G16B16A16_SINT
114 { /* GL_UNSIGNED_SHORT */
116 PIPE_FORMAT_R16_USCALED
,
117 PIPE_FORMAT_R16G16_USCALED
,
118 PIPE_FORMAT_R16G16B16_USCALED
,
119 PIPE_FORMAT_R16G16B16A16_USCALED
122 PIPE_FORMAT_R16_UNORM
,
123 PIPE_FORMAT_R16G16_UNORM
,
124 PIPE_FORMAT_R16G16B16_UNORM
,
125 PIPE_FORMAT_R16G16B16A16_UNORM
128 PIPE_FORMAT_R16_UINT
,
129 PIPE_FORMAT_R16G16_UINT
,
130 PIPE_FORMAT_R16G16B16_UINT
,
131 PIPE_FORMAT_R16G16B16A16_UINT
136 PIPE_FORMAT_R32_SSCALED
,
137 PIPE_FORMAT_R32G32_SSCALED
,
138 PIPE_FORMAT_R32G32B32_SSCALED
,
139 PIPE_FORMAT_R32G32B32A32_SSCALED
142 PIPE_FORMAT_R32_SNORM
,
143 PIPE_FORMAT_R32G32_SNORM
,
144 PIPE_FORMAT_R32G32B32_SNORM
,
145 PIPE_FORMAT_R32G32B32A32_SNORM
148 PIPE_FORMAT_R32_SINT
,
149 PIPE_FORMAT_R32G32_SINT
,
150 PIPE_FORMAT_R32G32B32_SINT
,
151 PIPE_FORMAT_R32G32B32A32_SINT
154 { /* GL_UNSIGNED_INT */
156 PIPE_FORMAT_R32_USCALED
,
157 PIPE_FORMAT_R32G32_USCALED
,
158 PIPE_FORMAT_R32G32B32_USCALED
,
159 PIPE_FORMAT_R32G32B32A32_USCALED
162 PIPE_FORMAT_R32_UNORM
,
163 PIPE_FORMAT_R32G32_UNORM
,
164 PIPE_FORMAT_R32G32B32_UNORM
,
165 PIPE_FORMAT_R32G32B32A32_UNORM
168 PIPE_FORMAT_R32_UINT
,
169 PIPE_FORMAT_R32G32_UINT
,
170 PIPE_FORMAT_R32G32B32_UINT
,
171 PIPE_FORMAT_R32G32B32A32_UINT
176 PIPE_FORMAT_R32_FLOAT
,
177 PIPE_FORMAT_R32G32_FLOAT
,
178 PIPE_FORMAT_R32G32B32_FLOAT
,
179 PIPE_FORMAT_R32G32B32A32_FLOAT
182 PIPE_FORMAT_R32_FLOAT
,
183 PIPE_FORMAT_R32G32_FLOAT
,
184 PIPE_FORMAT_R32G32B32_FLOAT
,
185 PIPE_FORMAT_R32G32B32A32_FLOAT
188 {{0}}, /* GL_2_BYTES */
189 {{0}}, /* GL_3_BYTES */
190 {{0}}, /* GL_4_BYTES */
193 PIPE_FORMAT_R64_FLOAT
,
194 PIPE_FORMAT_R64G64_FLOAT
,
195 PIPE_FORMAT_R64G64B64_FLOAT
,
196 PIPE_FORMAT_R64G64B64A64_FLOAT
199 PIPE_FORMAT_R64_FLOAT
,
200 PIPE_FORMAT_R64G64_FLOAT
,
201 PIPE_FORMAT_R64G64B64_FLOAT
,
202 PIPE_FORMAT_R64G64B64A64_FLOAT
205 { /* GL_HALF_FLOAT */
207 PIPE_FORMAT_R16_FLOAT
,
208 PIPE_FORMAT_R16G16_FLOAT
,
209 PIPE_FORMAT_R16G16B16_FLOAT
,
210 PIPE_FORMAT_R16G16B16A16_FLOAT
213 PIPE_FORMAT_R16_FLOAT
,
214 PIPE_FORMAT_R16G16_FLOAT
,
215 PIPE_FORMAT_R16G16B16_FLOAT
,
216 PIPE_FORMAT_R16G16B16A16_FLOAT
221 PIPE_FORMAT_R32_FIXED
,
222 PIPE_FORMAT_R32G32_FIXED
,
223 PIPE_FORMAT_R32G32B32_FIXED
,
224 PIPE_FORMAT_R32G32B32A32_FIXED
227 PIPE_FORMAT_R32_FIXED
,
228 PIPE_FORMAT_R32G32_FIXED
,
229 PIPE_FORMAT_R32G32B32_FIXED
,
230 PIPE_FORMAT_R32G32B32A32_FIXED
237 * Return a PIPE_FORMAT_x for the given GL datatype and size.
240 st_pipe_vertex_format(const struct gl_array_attributes
*attrib
)
242 const GLubyte size
= attrib
->Size
;
243 const GLenum16 format
= attrib
->Format
;
244 const bool normalized
= attrib
->Normalized
;
245 const bool integer
= attrib
->Integer
;
246 GLenum16 type
= attrib
->Type
;
249 assert(size
>= 1 && size
<= 4);
250 assert(format
== GL_RGBA
|| format
== GL_BGRA
);
251 assert(attrib
->_ElementSize
== _mesa_bytes_per_vertex_attrib(size
, type
));
254 case GL_HALF_FLOAT_OES
:
255 type
= GL_HALF_FLOAT
;
258 case GL_INT_2_10_10_10_REV
:
259 assert(size
== 4 && !integer
);
261 if (format
== GL_BGRA
) {
263 return PIPE_FORMAT_B10G10R10A2_SNORM
;
265 return PIPE_FORMAT_B10G10R10A2_SSCALED
;
268 return PIPE_FORMAT_R10G10B10A2_SNORM
;
270 return PIPE_FORMAT_R10G10B10A2_SSCALED
;
274 case GL_UNSIGNED_INT_2_10_10_10_REV
:
275 assert(size
== 4 && !integer
);
277 if (format
== GL_BGRA
) {
279 return PIPE_FORMAT_B10G10R10A2_UNORM
;
281 return PIPE_FORMAT_B10G10R10A2_USCALED
;
284 return PIPE_FORMAT_R10G10B10A2_UNORM
;
286 return PIPE_FORMAT_R10G10B10A2_USCALED
;
290 case GL_UNSIGNED_INT_10F_11F_11F_REV
:
291 assert(size
== 3 && !integer
&& format
== GL_RGBA
);
292 return PIPE_FORMAT_R11G11B10_FLOAT
;
294 case GL_UNSIGNED_BYTE
:
295 if (format
== GL_BGRA
) {
296 /* this is an odd-ball case */
298 return PIPE_FORMAT_B8G8R8A8_UNORM
;
303 index
= integer
*2 + normalized
;
305 assert(type
>= GL_BYTE
&& type
<= GL_FIXED
);
306 return vertex_formats
[type
- GL_BYTE
][index
][size
-1];
309 static const struct gl_vertex_array
*
310 get_client_array(const struct gl_vertex_array
*arrays
,
313 /* st_program uses 0xffffffff to denote a double placeholder attribute */
314 if (mesaAttr
== ST_DOUBLE_ATTRIB_PLACEHOLDER
)
316 return &arrays
[mesaAttr
];
320 * Examine the active arrays to determine if we have interleaved
321 * vertex arrays all living in one VBO, or all living in user space.
324 is_interleaved_arrays(const struct st_vertex_program
*vp
,
325 const struct gl_vertex_array
*arrays
,
329 const struct gl_buffer_object
*firstBufObj
= NULL
;
330 GLint firstStride
= -1;
331 const GLubyte
*firstPtr
= NULL
;
332 GLboolean userSpaceBuffer
= GL_FALSE
;
334 for (attr
= 0; attr
< num_inputs
; attr
++) {
335 const struct gl_vertex_array
*array
;
336 const struct gl_vertex_buffer_binding
*binding
;
337 const struct gl_array_attributes
*attrib
;
339 const struct gl_buffer_object
*bufObj
;
342 array
= get_client_array(arrays
, vp
->index_to_input
[attr
]);
346 binding
= array
->BufferBinding
;
347 attrib
= array
->VertexAttrib
;
348 stride
= binding
->Stride
; /* in bytes */
349 ptr
= _mesa_vertex_attrib_address(attrib
, binding
);
351 /* To keep things simple, don't allow interleaved zero-stride attribs. */
355 bufObj
= binding
->BufferObj
;
357 /* save info about the first array */
358 firstStride
= stride
;
360 firstBufObj
= bufObj
;
361 userSpaceBuffer
= !_mesa_is_bufferobj(bufObj
);
364 /* check if other arrays interleave with the first, in same buffer */
365 if (stride
!= firstStride
)
366 return GL_FALSE
; /* strides don't match */
368 if (bufObj
!= firstBufObj
)
369 return GL_FALSE
; /* arrays in different VBOs */
371 if (llabs(ptr
- firstPtr
) > firstStride
)
372 return GL_FALSE
; /* arrays start too far apart */
374 if ((!_mesa_is_bufferobj(bufObj
)) != userSpaceBuffer
)
375 return GL_FALSE
; /* mix of VBO and user-space arrays */
382 static void init_velement(struct pipe_vertex_element
*velement
,
383 int src_offset
, int format
,
384 int instance_divisor
, int vbo_index
)
386 velement
->src_offset
= src_offset
;
387 velement
->src_format
= format
;
388 velement
->instance_divisor
= instance_divisor
;
389 velement
->vertex_buffer_index
= vbo_index
;
390 assert(velement
->src_format
);
393 static void init_velement_lowered(const struct st_vertex_program
*vp
,
394 struct pipe_vertex_element
*velements
,
395 int src_offset
, int format
,
396 int instance_divisor
, int vbo_index
,
397 int nr_components
, GLboolean doubles
,
404 if (nr_components
< 2)
405 lower_format
= PIPE_FORMAT_R32G32_UINT
;
407 lower_format
= PIPE_FORMAT_R32G32B32A32_UINT
;
409 init_velement(&velements
[idx
], src_offset
,
410 lower_format
, instance_divisor
, vbo_index
);
413 if (idx
< vp
->num_inputs
&&
414 vp
->index_to_input
[idx
] == ST_DOUBLE_ATTRIB_PLACEHOLDER
) {
415 if (nr_components
>= 3) {
416 if (nr_components
== 3)
417 lower_format
= PIPE_FORMAT_R32G32_UINT
;
419 lower_format
= PIPE_FORMAT_R32G32B32A32_UINT
;
421 init_velement(&velements
[idx
], src_offset
+ 4 * sizeof(float),
422 lower_format
, instance_divisor
, vbo_index
);
424 /* The values here are undefined. Fill in some conservative
427 init_velement(&velements
[idx
], src_offset
, PIPE_FORMAT_R32G32_UINT
,
428 instance_divisor
, vbo_index
);
434 init_velement(&velements
[idx
], src_offset
,
435 format
, instance_divisor
, vbo_index
);
442 set_vertex_attribs(struct st_context
*st
,
443 struct pipe_vertex_buffer
*vbuffers
,
444 unsigned num_vbuffers
,
445 struct pipe_vertex_element
*velements
,
446 unsigned num_velements
)
448 struct cso_context
*cso
= st
->cso_context
;
450 cso_set_vertex_buffers(cso
, 0, num_vbuffers
, vbuffers
);
451 if (st
->last_num_vbuffers
> num_vbuffers
) {
452 /* Unbind remaining buffers, if any. */
453 cso_set_vertex_buffers(cso
, num_vbuffers
,
454 st
->last_num_vbuffers
- num_vbuffers
, NULL
);
456 st
->last_num_vbuffers
= num_vbuffers
;
457 cso_set_vertex_elements(cso
, num_velements
, velements
);
461 * Set up for drawing interleaved arrays that all live in one VBO
462 * or all live in user space.
463 * \param vbuffer returns vertex buffer info
464 * \param velements returns vertex element info
467 setup_interleaved_attribs(struct st_context
*st
,
468 const struct st_vertex_program
*vp
,
469 const struct gl_vertex_array
*arrays
,
472 struct pipe_vertex_buffer vbuffer
;
473 struct pipe_vertex_element velements
[PIPE_MAX_ATTRIBS
] = {{0}};
475 const GLubyte
*low_addr
= NULL
;
476 GLboolean usingVBO
; /* all arrays in a VBO? */
477 struct gl_buffer_object
*bufobj
;
480 /* Find the lowest address of the arrays we're drawing,
481 * Init bufobj and stride.
484 const struct gl_vertex_array
*array
;
485 const struct gl_vertex_buffer_binding
*binding
;
486 const struct gl_array_attributes
*attrib
;
488 array
= get_client_array(arrays
, vp
->index_to_input
[0]);
491 binding
= array
->BufferBinding
;
492 attrib
= array
->VertexAttrib
;
494 /* Since we're doing interleaved arrays, we know there'll be at most
495 * one buffer object and the stride will be the same for all arrays.
498 bufobj
= binding
->BufferObj
;
499 stride
= binding
->Stride
;
501 low_addr
= _mesa_vertex_attrib_address(attrib
, binding
);
503 for (attr
= 1; attr
< num_inputs
; attr
++) {
504 const GLubyte
*start
;
505 array
= get_client_array(arrays
, vp
->index_to_input
[attr
]);
508 binding
= array
->BufferBinding
;
509 attrib
= array
->VertexAttrib
;
510 start
= _mesa_vertex_attrib_address(attrib
, binding
);
511 low_addr
= MIN2(low_addr
, start
);
515 /* not sure we'll ever have zero inputs, but play it safe */
521 /* are the arrays in user space? */
522 usingVBO
= _mesa_is_bufferobj(bufobj
);
524 for (attr
= 0; attr
< num_inputs
;) {
525 const struct gl_vertex_array
*array
;
526 const struct gl_vertex_buffer_binding
*binding
;
527 const struct gl_array_attributes
*attrib
;
532 array
= get_client_array(arrays
, vp
->index_to_input
[attr
]);
535 binding
= array
->BufferBinding
;
536 attrib
= array
->VertexAttrib
;
537 ptr
= _mesa_vertex_attrib_address(attrib
, binding
);
539 src_offset
= (unsigned) (ptr
- low_addr
);
541 src_format
= st_pipe_vertex_format(attrib
);
543 init_velement_lowered(vp
, velements
, src_offset
, src_format
,
544 binding
->InstanceDivisor
, 0,
545 attrib
->Size
, attrib
->Doubles
, &attr
);
549 * Return the vbuffer info and setup user-space attrib info, if needed.
551 if (num_inputs
== 0) {
552 /* just defensive coding here */
553 vbuffer
.buffer
.resource
= NULL
;
554 vbuffer
.is_user_buffer
= false;
555 vbuffer
.buffer_offset
= 0;
559 /* all interleaved arrays in a VBO */
560 struct st_buffer_object
*stobj
= st_buffer_object(bufobj
);
562 if (!stobj
|| !stobj
->buffer
) {
563 st
->vertex_array_out_of_memory
= true;
564 return; /* out-of-memory error probably */
567 vbuffer
.buffer
.resource
= stobj
->buffer
;
568 vbuffer
.is_user_buffer
= false;
569 vbuffer
.buffer_offset
= pointer_to_offset(low_addr
);
570 vbuffer
.stride
= stride
;
573 /* all interleaved arrays in user memory */
574 vbuffer
.buffer
.user
= low_addr
;
575 vbuffer
.is_user_buffer
= !!low_addr
; /* if NULL, then unbind */
576 vbuffer
.buffer_offset
= 0;
577 vbuffer
.stride
= stride
;
580 st
->draw_needs_minmax_index
= true;
583 set_vertex_attribs(st
, &vbuffer
, num_inputs
? 1 : 0,
584 velements
, num_inputs
);
588 * Set up a separate pipe_vertex_buffer and pipe_vertex_element for each
590 * \param vbuffer returns vertex buffer info
591 * \param velements returns vertex element info
594 setup_non_interleaved_attribs(struct st_context
*st
,
595 const struct st_vertex_program
*vp
,
596 const struct gl_vertex_array
*arrays
,
599 struct gl_context
*ctx
= st
->ctx
;
600 struct pipe_vertex_buffer vbuffer
[PIPE_MAX_ATTRIBS
];
601 struct pipe_vertex_element velements
[PIPE_MAX_ATTRIBS
] = {{0}};
602 unsigned num_vbuffers
= 0;
603 unsigned unref_buffers
= 0;
606 for (attr
= 0; attr
< num_inputs
;) {
607 const unsigned mesaAttr
= vp
->index_to_input
[attr
];
608 const struct gl_vertex_array
*array
;
609 const struct gl_vertex_buffer_binding
*binding
;
610 const struct gl_array_attributes
*attrib
;
611 struct gl_buffer_object
*bufobj
;
616 array
= get_client_array(arrays
, mesaAttr
);
619 bufidx
= num_vbuffers
++;
621 binding
= array
->BufferBinding
;
622 attrib
= array
->VertexAttrib
;
623 stride
= binding
->Stride
;
624 bufobj
= binding
->BufferObj
;
626 if (_mesa_is_bufferobj(bufobj
)) {
627 /* Attribute data is in a VBO.
628 * Recall that for VBOs, the gl_vertex_array->Ptr field is
629 * really an offset from the start of the VBO, not a pointer.
631 struct st_buffer_object
*stobj
= st_buffer_object(bufobj
);
633 if (!stobj
|| !stobj
->buffer
) {
634 st
->vertex_array_out_of_memory
= true;
635 return; /* out-of-memory error probably */
638 vbuffer
[bufidx
].buffer
.resource
= stobj
->buffer
;
639 vbuffer
[bufidx
].is_user_buffer
= false;
640 vbuffer
[bufidx
].buffer_offset
=
641 binding
->Offset
+ attrib
->RelativeOffset
;
645 unsigned size
= attrib
->_ElementSize
;
646 /* This is optimal for GPU cache line usage if the upload size
647 * is <= cache line size.
649 unsigned alignment
= util_next_power_of_two(size
);
652 vbuffer
[bufidx
].buffer
.user
= attrib
->Ptr
;
653 void *ptr
= attrib
->Ptr
? (void*)attrib
->Ptr
:
654 (void*)ctx
->Current
.Attrib
[mesaAttr
];
656 vbuffer
[bufidx
].is_user_buffer
= false;
657 vbuffer
[bufidx
].buffer
.resource
= NULL
;
659 /* Use const_uploader for zero-stride vertex attributes, because
660 * it may use a better memory placement than stream_uploader.
661 * The reason is that zero-stride attributes can be fetched many
662 * times (thousands of times), so a better placement is going to
665 * Upload the maximum possible size, which is 4x GLdouble = 32.
667 u_upload_data(st
->can_bind_const_buffer_as_vertex
?
668 st
->pipe
->const_uploader
:
669 st
->pipe
->stream_uploader
,
670 0, size
, alignment
, ptr
,
671 &vbuffer
[bufidx
].buffer_offset
,
672 &vbuffer
[bufidx
].buffer
.resource
);
673 unref_buffers
|= 1u << bufidx
;
676 vbuffer
[bufidx
].buffer
.user
= attrib
->Ptr
;
677 vbuffer
[bufidx
].is_user_buffer
= true;
678 vbuffer
[bufidx
].buffer_offset
= 0;
680 if (!binding
->InstanceDivisor
)
681 st
->draw_needs_minmax_index
= true;
685 /* common-case setup */
686 vbuffer
[bufidx
].stride
= stride
; /* in bytes */
688 src_format
= st_pipe_vertex_format(attrib
);
690 init_velement_lowered(vp
, velements
, 0, src_format
,
691 binding
->InstanceDivisor
, bufidx
,
692 attrib
->Size
, attrib
->Doubles
, &attr
);
695 if (!ctx
->Const
.AllowMappedBuffersDuringExecution
) {
696 u_upload_unmap(st
->pipe
->stream_uploader
);
699 set_vertex_attribs(st
, vbuffer
, num_vbuffers
, velements
, num_inputs
);
701 /* Unreference uploaded zero-stride vertex buffers. */
702 while (unref_buffers
) {
703 unsigned i
= u_bit_scan(&unref_buffers
);
704 pipe_resource_reference(&vbuffer
[i
].buffer
.resource
, NULL
);
708 void st_update_array(struct st_context
*st
)
710 struct gl_context
*ctx
= st
->ctx
;
711 const struct gl_vertex_array
*arrays
= ctx
->Array
._DrawArrays
;
712 const struct st_vertex_program
*vp
;
715 st
->vertex_array_out_of_memory
= FALSE
;
716 st
->draw_needs_minmax_index
= false;
718 /* No drawing has been done yet, so do nothing. */
722 /* vertex program validation must be done before this */
724 num_inputs
= st
->vp_variant
->num_inputs
;
726 if (is_interleaved_arrays(vp
, arrays
, num_inputs
))
727 setup_interleaved_attribs(st
, vp
, arrays
, num_inputs
);
729 setup_non_interleaved_attribs(st
, vp
, arrays
, num_inputs
);