Introduce .editorconfig
[mesa.git] / src / mesa / state_tracker / st_atom_array.c
1
2 /**************************************************************************
3 *
4 * Copyright 2007 VMware, Inc.
5 * Copyright 2012 Marek Olšák <maraeo@gmail.com>
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 **************************************************************************/
29
30 /*
31 * This converts the VBO's vertex attribute/array information into
32 * Gallium vertex state and binds it.
33 *
34 * Authors:
35 * Keith Whitwell <keithw@vmware.com>
36 * Marek Olšák <maraeo@gmail.com>
37 */
38
39 #include "st_context.h"
40 #include "st_atom.h"
41 #include "st_cb_bufferobjects.h"
42 #include "st_draw.h"
43 #include "st_program.h"
44
45 #include "cso_cache/cso_context.h"
46 #include "util/u_math.h"
47 #include "main/bufferobj.h"
48 #include "main/glformats.h"
49
50
51 static GLuint double_types[4] = {
52 PIPE_FORMAT_R64_FLOAT,
53 PIPE_FORMAT_R64G64_FLOAT,
54 PIPE_FORMAT_R64G64B64_FLOAT,
55 PIPE_FORMAT_R64G64B64A64_FLOAT
56 };
57
58 static GLuint float_types[4] = {
59 PIPE_FORMAT_R32_FLOAT,
60 PIPE_FORMAT_R32G32_FLOAT,
61 PIPE_FORMAT_R32G32B32_FLOAT,
62 PIPE_FORMAT_R32G32B32A32_FLOAT
63 };
64
65 static GLuint half_float_types[4] = {
66 PIPE_FORMAT_R16_FLOAT,
67 PIPE_FORMAT_R16G16_FLOAT,
68 PIPE_FORMAT_R16G16B16_FLOAT,
69 PIPE_FORMAT_R16G16B16A16_FLOAT
70 };
71
72 static GLuint uint_types_norm[4] = {
73 PIPE_FORMAT_R32_UNORM,
74 PIPE_FORMAT_R32G32_UNORM,
75 PIPE_FORMAT_R32G32B32_UNORM,
76 PIPE_FORMAT_R32G32B32A32_UNORM
77 };
78
79 static GLuint uint_types_scale[4] = {
80 PIPE_FORMAT_R32_USCALED,
81 PIPE_FORMAT_R32G32_USCALED,
82 PIPE_FORMAT_R32G32B32_USCALED,
83 PIPE_FORMAT_R32G32B32A32_USCALED
84 };
85
86 static GLuint uint_types_int[4] = {
87 PIPE_FORMAT_R32_UINT,
88 PIPE_FORMAT_R32G32_UINT,
89 PIPE_FORMAT_R32G32B32_UINT,
90 PIPE_FORMAT_R32G32B32A32_UINT
91 };
92
93 static GLuint int_types_norm[4] = {
94 PIPE_FORMAT_R32_SNORM,
95 PIPE_FORMAT_R32G32_SNORM,
96 PIPE_FORMAT_R32G32B32_SNORM,
97 PIPE_FORMAT_R32G32B32A32_SNORM
98 };
99
100 static GLuint int_types_scale[4] = {
101 PIPE_FORMAT_R32_SSCALED,
102 PIPE_FORMAT_R32G32_SSCALED,
103 PIPE_FORMAT_R32G32B32_SSCALED,
104 PIPE_FORMAT_R32G32B32A32_SSCALED
105 };
106
107 static GLuint int_types_int[4] = {
108 PIPE_FORMAT_R32_SINT,
109 PIPE_FORMAT_R32G32_SINT,
110 PIPE_FORMAT_R32G32B32_SINT,
111 PIPE_FORMAT_R32G32B32A32_SINT
112 };
113
114 static GLuint ushort_types_norm[4] = {
115 PIPE_FORMAT_R16_UNORM,
116 PIPE_FORMAT_R16G16_UNORM,
117 PIPE_FORMAT_R16G16B16_UNORM,
118 PIPE_FORMAT_R16G16B16A16_UNORM
119 };
120
121 static GLuint ushort_types_scale[4] = {
122 PIPE_FORMAT_R16_USCALED,
123 PIPE_FORMAT_R16G16_USCALED,
124 PIPE_FORMAT_R16G16B16_USCALED,
125 PIPE_FORMAT_R16G16B16A16_USCALED
126 };
127
128 static GLuint ushort_types_int[4] = {
129 PIPE_FORMAT_R16_UINT,
130 PIPE_FORMAT_R16G16_UINT,
131 PIPE_FORMAT_R16G16B16_UINT,
132 PIPE_FORMAT_R16G16B16A16_UINT
133 };
134
135 static GLuint short_types_norm[4] = {
136 PIPE_FORMAT_R16_SNORM,
137 PIPE_FORMAT_R16G16_SNORM,
138 PIPE_FORMAT_R16G16B16_SNORM,
139 PIPE_FORMAT_R16G16B16A16_SNORM
140 };
141
142 static GLuint short_types_scale[4] = {
143 PIPE_FORMAT_R16_SSCALED,
144 PIPE_FORMAT_R16G16_SSCALED,
145 PIPE_FORMAT_R16G16B16_SSCALED,
146 PIPE_FORMAT_R16G16B16A16_SSCALED
147 };
148
149 static GLuint short_types_int[4] = {
150 PIPE_FORMAT_R16_SINT,
151 PIPE_FORMAT_R16G16_SINT,
152 PIPE_FORMAT_R16G16B16_SINT,
153 PIPE_FORMAT_R16G16B16A16_SINT
154 };
155
156 static GLuint ubyte_types_norm[4] = {
157 PIPE_FORMAT_R8_UNORM,
158 PIPE_FORMAT_R8G8_UNORM,
159 PIPE_FORMAT_R8G8B8_UNORM,
160 PIPE_FORMAT_R8G8B8A8_UNORM
161 };
162
163 static GLuint ubyte_types_scale[4] = {
164 PIPE_FORMAT_R8_USCALED,
165 PIPE_FORMAT_R8G8_USCALED,
166 PIPE_FORMAT_R8G8B8_USCALED,
167 PIPE_FORMAT_R8G8B8A8_USCALED
168 };
169
170 static GLuint ubyte_types_int[4] = {
171 PIPE_FORMAT_R8_UINT,
172 PIPE_FORMAT_R8G8_UINT,
173 PIPE_FORMAT_R8G8B8_UINT,
174 PIPE_FORMAT_R8G8B8A8_UINT
175 };
176
177 static GLuint byte_types_norm[4] = {
178 PIPE_FORMAT_R8_SNORM,
179 PIPE_FORMAT_R8G8_SNORM,
180 PIPE_FORMAT_R8G8B8_SNORM,
181 PIPE_FORMAT_R8G8B8A8_SNORM
182 };
183
184 static GLuint byte_types_scale[4] = {
185 PIPE_FORMAT_R8_SSCALED,
186 PIPE_FORMAT_R8G8_SSCALED,
187 PIPE_FORMAT_R8G8B8_SSCALED,
188 PIPE_FORMAT_R8G8B8A8_SSCALED
189 };
190
191 static GLuint byte_types_int[4] = {
192 PIPE_FORMAT_R8_SINT,
193 PIPE_FORMAT_R8G8_SINT,
194 PIPE_FORMAT_R8G8B8_SINT,
195 PIPE_FORMAT_R8G8B8A8_SINT
196 };
197
198 static GLuint fixed_types[4] = {
199 PIPE_FORMAT_R32_FIXED,
200 PIPE_FORMAT_R32G32_FIXED,
201 PIPE_FORMAT_R32G32B32_FIXED,
202 PIPE_FORMAT_R32G32B32A32_FIXED
203 };
204
205
206 /**
207 * Return a PIPE_FORMAT_x for the given GL datatype and size.
208 */
209 enum pipe_format
210 st_pipe_vertex_format(GLenum type, GLuint size, GLenum format,
211 GLboolean normalized, GLboolean integer)
212 {
213 assert((type >= GL_BYTE && type <= GL_DOUBLE) ||
214 type == GL_FIXED || type == GL_HALF_FLOAT ||
215 type == GL_INT_2_10_10_10_REV ||
216 type == GL_UNSIGNED_INT_2_10_10_10_REV ||
217 type == GL_UNSIGNED_INT_10F_11F_11F_REV);
218 assert(size >= 1);
219 assert(size <= 4);
220 assert(format == GL_RGBA || format == GL_BGRA);
221
222 if (type == GL_INT_2_10_10_10_REV ||
223 type == GL_UNSIGNED_INT_2_10_10_10_REV) {
224 assert(size == 4);
225 assert(!integer);
226
227 if (format == GL_BGRA) {
228 if (type == GL_INT_2_10_10_10_REV) {
229 if (normalized)
230 return PIPE_FORMAT_B10G10R10A2_SNORM;
231 else
232 return PIPE_FORMAT_B10G10R10A2_SSCALED;
233 } else {
234 if (normalized)
235 return PIPE_FORMAT_B10G10R10A2_UNORM;
236 else
237 return PIPE_FORMAT_B10G10R10A2_USCALED;
238 }
239 } else {
240 if (type == GL_INT_2_10_10_10_REV) {
241 if (normalized)
242 return PIPE_FORMAT_R10G10B10A2_SNORM;
243 else
244 return PIPE_FORMAT_R10G10B10A2_SSCALED;
245 } else {
246 if (normalized)
247 return PIPE_FORMAT_R10G10B10A2_UNORM;
248 else
249 return PIPE_FORMAT_R10G10B10A2_USCALED;
250 }
251 }
252 }
253
254 if (type == GL_UNSIGNED_INT_10F_11F_11F_REV) {
255 assert(size == 3);
256 assert(!integer);
257 assert(format == GL_RGBA);
258
259 return PIPE_FORMAT_R11G11B10_FLOAT;
260 }
261
262 if (format == GL_BGRA) {
263 /* this is an odd-ball case */
264 assert(type == GL_UNSIGNED_BYTE);
265 assert(normalized);
266 return PIPE_FORMAT_B8G8R8A8_UNORM;
267 }
268
269 if (integer) {
270 switch (type) {
271 case GL_INT: return int_types_int[size-1];
272 case GL_SHORT: return short_types_int[size-1];
273 case GL_BYTE: return byte_types_int[size-1];
274 case GL_UNSIGNED_INT: return uint_types_int[size-1];
275 case GL_UNSIGNED_SHORT: return ushort_types_int[size-1];
276 case GL_UNSIGNED_BYTE: return ubyte_types_int[size-1];
277 default: assert(0); return 0;
278 }
279 }
280 else if (normalized) {
281 switch (type) {
282 case GL_DOUBLE: return double_types[size-1];
283 case GL_FLOAT: return float_types[size-1];
284 case GL_HALF_FLOAT: return half_float_types[size-1];
285 case GL_INT: return int_types_norm[size-1];
286 case GL_SHORT: return short_types_norm[size-1];
287 case GL_BYTE: return byte_types_norm[size-1];
288 case GL_UNSIGNED_INT: return uint_types_norm[size-1];
289 case GL_UNSIGNED_SHORT: return ushort_types_norm[size-1];
290 case GL_UNSIGNED_BYTE: return ubyte_types_norm[size-1];
291 case GL_FIXED: return fixed_types[size-1];
292 default: assert(0); return 0;
293 }
294 }
295 else {
296 switch (type) {
297 case GL_DOUBLE: return double_types[size-1];
298 case GL_FLOAT: return float_types[size-1];
299 case GL_HALF_FLOAT: return half_float_types[size-1];
300 case GL_INT: return int_types_scale[size-1];
301 case GL_SHORT: return short_types_scale[size-1];
302 case GL_BYTE: return byte_types_scale[size-1];
303 case GL_UNSIGNED_INT: return uint_types_scale[size-1];
304 case GL_UNSIGNED_SHORT: return ushort_types_scale[size-1];
305 case GL_UNSIGNED_BYTE: return ubyte_types_scale[size-1];
306 case GL_FIXED: return fixed_types[size-1];
307 default: assert(0); return 0;
308 }
309 }
310 return PIPE_FORMAT_NONE; /* silence compiler warning */
311 }
312
313 static const struct gl_client_array *
314 get_client_array(const struct st_vertex_program *vp,
315 const struct gl_client_array **arrays,
316 int attr)
317 {
318 const GLuint mesaAttr = vp->index_to_input[attr];
319 /* st_program uses 0xffffffff to denote a double placeholder attribute */
320 if (mesaAttr == ST_DOUBLE_ATTRIB_PLACEHOLDER)
321 return NULL;
322 return arrays[mesaAttr];
323 }
324
325 /**
326 * Examine the active arrays to determine if we have interleaved
327 * vertex arrays all living in one VBO, or all living in user space.
328 */
329 static GLboolean
330 is_interleaved_arrays(const struct st_vertex_program *vp,
331 const struct st_vp_variant *vpv,
332 const struct gl_client_array **arrays)
333 {
334 GLuint attr;
335 const struct gl_buffer_object *firstBufObj = NULL;
336 GLint firstStride = -1;
337 const GLubyte *firstPtr = NULL;
338 GLboolean userSpaceBuffer = GL_FALSE;
339
340 for (attr = 0; attr < vpv->num_inputs; attr++) {
341 const struct gl_client_array *array;
342 const struct gl_buffer_object *bufObj;
343 GLsizei stride;
344
345 array = get_client_array(vp, arrays, attr);
346 if (!array)
347 continue;
348
349 stride = array->StrideB; /* in bytes */
350 bufObj = array->BufferObj;
351 if (attr == 0) {
352 /* save info about the first array */
353 firstStride = stride;
354 firstPtr = array->Ptr;
355 firstBufObj = bufObj;
356 userSpaceBuffer = !bufObj || !bufObj->Name;
357 }
358 else {
359 /* check if other arrays interleave with the first, in same buffer */
360 if (stride != firstStride)
361 return GL_FALSE; /* strides don't match */
362
363 if (bufObj != firstBufObj)
364 return GL_FALSE; /* arrays in different VBOs */
365
366 if (llabs(array->Ptr - firstPtr) > firstStride)
367 return GL_FALSE; /* arrays start too far apart */
368
369 if ((!_mesa_is_bufferobj(bufObj)) != userSpaceBuffer)
370 return GL_FALSE; /* mix of VBO and user-space arrays */
371 }
372 }
373
374 return GL_TRUE;
375 }
376
377 static void init_velement(struct pipe_vertex_element *velement,
378 int src_offset, int format,
379 int instance_divisor, int vbo_index)
380 {
381 velement->src_offset = src_offset;
382 velement->src_format = format;
383 velement->instance_divisor = instance_divisor;
384 velement->vertex_buffer_index = vbo_index;
385 assert(velement->src_format);
386 }
387
388 static void init_velement_lowered(struct st_context *st,
389 struct pipe_vertex_element *velements,
390 int src_offset, int format,
391 int instance_divisor, int vbo_index,
392 int nr_components, GLboolean doubles,
393 GLuint *attr_idx)
394 {
395 int idx = *attr_idx;
396 if (doubles) {
397 int lower_format;
398
399 if (nr_components == 1)
400 lower_format = PIPE_FORMAT_R32G32_UINT;
401 else if (nr_components >= 2)
402 lower_format = PIPE_FORMAT_R32G32B32A32_UINT;
403
404 init_velement(&velements[idx], src_offset,
405 lower_format, instance_divisor, vbo_index);
406 idx++;
407
408 if (nr_components > 2) {
409 if (nr_components == 3)
410 lower_format = PIPE_FORMAT_R32G32_UINT;
411 else if (nr_components >= 4)
412 lower_format = PIPE_FORMAT_R32G32B32A32_UINT;
413
414 init_velement(&velements[idx], src_offset + 4 * sizeof(float),
415 lower_format, instance_divisor, vbo_index);
416 idx++;
417 }
418 } else {
419 init_velement(&velements[idx], src_offset,
420 format, instance_divisor, vbo_index);
421 idx++;
422 }
423 *attr_idx = idx;
424 }
425
426 /**
427 * Set up for drawing interleaved arrays that all live in one VBO
428 * or all live in user space.
429 * \param vbuffer returns vertex buffer info
430 * \param velements returns vertex element info
431 */
432 static boolean
433 setup_interleaved_attribs(struct st_context *st,
434 const struct st_vertex_program *vp,
435 const struct st_vp_variant *vpv,
436 const struct gl_client_array **arrays,
437 struct pipe_vertex_buffer *vbuffer,
438 struct pipe_vertex_element velements[],
439 unsigned *num_velements)
440 {
441 GLuint attr, attr_idx;
442 const GLubyte *low_addr = NULL;
443 GLboolean usingVBO; /* all arrays in a VBO? */
444 struct gl_buffer_object *bufobj;
445 GLsizei stride;
446
447 /* Find the lowest address of the arrays we're drawing,
448 * Init bufobj and stride.
449 */
450 if (vpv->num_inputs) {
451 const struct gl_client_array *array;
452
453 array = get_client_array(vp, arrays, 0);
454 assert(array);
455
456 /* Since we're doing interleaved arrays, we know there'll be at most
457 * one buffer object and the stride will be the same for all arrays.
458 * Grab them now.
459 */
460 bufobj = array->BufferObj;
461 stride = array->StrideB;
462
463 low_addr = arrays[vp->index_to_input[0]]->Ptr;
464
465 for (attr = 1; attr < vpv->num_inputs; attr++) {
466 const GLubyte *start;
467 array = get_client_array(vp, arrays, attr);
468 if (!array)
469 continue;
470 start = array->Ptr;
471 low_addr = MIN2(low_addr, start);
472 }
473 }
474 else {
475 /* not sure we'll ever have zero inputs, but play it safe */
476 bufobj = NULL;
477 stride = 0;
478 low_addr = 0;
479 }
480
481 /* are the arrays in user space? */
482 usingVBO = _mesa_is_bufferobj(bufobj);
483
484 attr_idx = 0;
485 for (attr = 0; attr < vpv->num_inputs; attr++) {
486 const struct gl_client_array *array;
487 unsigned src_offset;
488 unsigned src_format;
489
490 array = get_client_array(vp, arrays, attr);
491 if (!array)
492 continue;
493
494 src_offset = (unsigned) (array->Ptr - low_addr);
495 assert(array->_ElementSize ==
496 _mesa_bytes_per_vertex_attrib(array->Size, array->Type));
497
498 src_format = st_pipe_vertex_format(array->Type,
499 array->Size,
500 array->Format,
501 array->Normalized,
502 array->Integer);
503
504 init_velement_lowered(st, velements, src_offset, src_format,
505 array->InstanceDivisor, 0,
506 array->Size, array->Doubles, &attr_idx);
507 }
508
509 *num_velements = attr_idx;
510
511 /*
512 * Return the vbuffer info and setup user-space attrib info, if needed.
513 */
514 if (vpv->num_inputs == 0) {
515 /* just defensive coding here */
516 vbuffer->buffer = NULL;
517 vbuffer->user_buffer = NULL;
518 vbuffer->buffer_offset = 0;
519 vbuffer->stride = 0;
520 }
521 else if (usingVBO) {
522 /* all interleaved arrays in a VBO */
523 struct st_buffer_object *stobj = st_buffer_object(bufobj);
524
525 if (!stobj || !stobj->buffer) {
526 return FALSE; /* out-of-memory error probably */
527 }
528
529 vbuffer->buffer = stobj->buffer;
530 vbuffer->user_buffer = NULL;
531 vbuffer->buffer_offset = pointer_to_offset(low_addr);
532 vbuffer->stride = stride;
533 }
534 else {
535 /* all interleaved arrays in user memory */
536 vbuffer->buffer = NULL;
537 vbuffer->user_buffer = low_addr;
538 vbuffer->buffer_offset = 0;
539 vbuffer->stride = stride;
540 }
541 return TRUE;
542 }
543
544 /**
545 * Set up a separate pipe_vertex_buffer and pipe_vertex_element for each
546 * vertex attribute.
547 * \param vbuffer returns vertex buffer info
548 * \param velements returns vertex element info
549 */
550 static boolean
551 setup_non_interleaved_attribs(struct st_context *st,
552 const struct st_vertex_program *vp,
553 const struct st_vp_variant *vpv,
554 const struct gl_client_array **arrays,
555 struct pipe_vertex_buffer vbuffer[],
556 struct pipe_vertex_element velements[],
557 unsigned *num_velements)
558 {
559 struct gl_context *ctx = st->ctx;
560 GLuint attr, attr_idx = 0;
561
562 for (attr = 0; attr < vpv->num_inputs; attr++) {
563 const GLuint mesaAttr = vp->index_to_input[attr];
564 const struct gl_client_array *array;
565 struct gl_buffer_object *bufobj;
566 GLsizei stride;
567 unsigned src_format;
568
569 array = get_client_array(vp, arrays, attr);
570 if (!array) {
571 vbuffer[attr].buffer = NULL;
572 vbuffer[attr].user_buffer = NULL;
573 vbuffer[attr].buffer_offset = 0;
574 continue;
575 }
576
577 stride = array->StrideB;
578 bufobj = array->BufferObj;
579 assert(array->_ElementSize ==
580 _mesa_bytes_per_vertex_attrib(array->Size, array->Type));
581
582 if (_mesa_is_bufferobj(bufobj)) {
583 /* Attribute data is in a VBO.
584 * Recall that for VBOs, the gl_client_array->Ptr field is
585 * really an offset from the start of the VBO, not a pointer.
586 */
587 struct st_buffer_object *stobj = st_buffer_object(bufobj);
588
589 if (!stobj || !stobj->buffer) {
590 return FALSE; /* out-of-memory error probably */
591 }
592
593 vbuffer[attr].buffer = stobj->buffer;
594 vbuffer[attr].user_buffer = NULL;
595 vbuffer[attr].buffer_offset = pointer_to_offset(array->Ptr);
596 }
597 else {
598 /* wrap user data */
599 void *ptr;
600
601 if (array->Ptr) {
602 ptr = (void *) array->Ptr;
603 }
604 else {
605 /* no array, use ctx->Current.Attrib[] value */
606 ptr = (void *) ctx->Current.Attrib[mesaAttr];
607 stride = 0;
608 }
609
610 assert(ptr);
611
612 vbuffer[attr].buffer = NULL;
613 vbuffer[attr].user_buffer = ptr;
614 vbuffer[attr].buffer_offset = 0;
615 }
616
617 /* common-case setup */
618 vbuffer[attr].stride = stride; /* in bytes */
619
620 src_format = st_pipe_vertex_format(array->Type,
621 array->Size,
622 array->Format,
623 array->Normalized,
624 array->Integer);
625
626 init_velement_lowered(st, velements, 0, src_format,
627 array->InstanceDivisor, attr,
628 array->Size, array->Doubles, &attr_idx);
629
630 }
631
632 *num_velements = attr_idx;
633 return TRUE;
634 }
635
636 static void update_array(struct st_context *st)
637 {
638 struct gl_context *ctx = st->ctx;
639 const struct gl_client_array **arrays = ctx->Array._DrawArrays;
640 const struct st_vertex_program *vp;
641 const struct st_vp_variant *vpv;
642 struct pipe_vertex_buffer vbuffer[PIPE_MAX_SHADER_INPUTS];
643 struct pipe_vertex_element velements[PIPE_MAX_ATTRIBS];
644 unsigned num_vbuffers, num_velements;
645
646 st->vertex_array_out_of_memory = FALSE;
647
648 /* No drawing has been done yet, so do nothing. */
649 if (!arrays)
650 return;
651
652 /* vertex program validation must be done before this */
653 vp = st->vp;
654 vpv = st->vp_variant;
655
656 memset(velements, 0, sizeof(struct pipe_vertex_element) * vpv->num_inputs);
657
658 /*
659 * Setup the vbuffer[] and velements[] arrays.
660 */
661 if (is_interleaved_arrays(vp, vpv, arrays)) {
662 if (!setup_interleaved_attribs(st, vp, vpv, arrays, vbuffer, velements, &num_velements)) {
663 st->vertex_array_out_of_memory = TRUE;
664 return;
665 }
666
667 num_vbuffers = 1;
668 if (num_velements == 0)
669 num_vbuffers = 0;
670 }
671 else {
672 if (!setup_non_interleaved_attribs(st, vp, vpv, arrays, vbuffer,
673 velements, &num_velements)) {
674 st->vertex_array_out_of_memory = TRUE;
675 return;
676 }
677
678 num_vbuffers = vpv->num_inputs;
679 }
680
681 cso_set_vertex_buffers(st->cso_context, 0, num_vbuffers, vbuffer);
682 if (st->last_num_vbuffers > num_vbuffers) {
683 /* Unbind remaining buffers, if any. */
684 cso_set_vertex_buffers(st->cso_context, num_vbuffers,
685 st->last_num_vbuffers - num_vbuffers, NULL);
686 }
687 st->last_num_vbuffers = num_vbuffers;
688 cso_set_vertex_elements(st->cso_context, num_velements, velements);
689 }
690
691
692 const struct st_tracked_state st_update_array = {
693 update_array /* update */
694 };