mesa: Make gl_vertex_array contain pointers to first order VAO members.
[mesa.git] / src / mesa / state_tracker / st_atom_array.c
1
2 /**************************************************************************
3 *
4 * Copyright 2007 VMware, Inc.
5 * Copyright 2012 Marek Olšák <maraeo@gmail.com>
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 **************************************************************************/
29
30 /*
31 * This converts the VBO's vertex attribute/array information into
32 * Gallium vertex state and binds it.
33 *
34 * Authors:
35 * Keith Whitwell <keithw@vmware.com>
36 * Marek Olšák <maraeo@gmail.com>
37 */
38
39 #include "st_context.h"
40 #include "st_atom.h"
41 #include "st_cb_bufferobjects.h"
42 #include "st_draw.h"
43 #include "st_program.h"
44
45 #include "cso_cache/cso_context.h"
46 #include "util/u_math.h"
47 #include "util/u_upload_mgr.h"
48 #include "main/bufferobj.h"
49 #include "main/glformats.h"
50 #include "main/varray.h"
51
52 /* vertex_formats[gltype - GL_BYTE][integer*2 + normalized][size - 1] */
53 static const uint16_t vertex_formats[][4][4] = {
54 { /* GL_BYTE */
55 {
56 PIPE_FORMAT_R8_SSCALED,
57 PIPE_FORMAT_R8G8_SSCALED,
58 PIPE_FORMAT_R8G8B8_SSCALED,
59 PIPE_FORMAT_R8G8B8A8_SSCALED
60 },
61 {
62 PIPE_FORMAT_R8_SNORM,
63 PIPE_FORMAT_R8G8_SNORM,
64 PIPE_FORMAT_R8G8B8_SNORM,
65 PIPE_FORMAT_R8G8B8A8_SNORM
66 },
67 {
68 PIPE_FORMAT_R8_SINT,
69 PIPE_FORMAT_R8G8_SINT,
70 PIPE_FORMAT_R8G8B8_SINT,
71 PIPE_FORMAT_R8G8B8A8_SINT
72 },
73 },
74 { /* GL_UNSIGNED_BYTE */
75 {
76 PIPE_FORMAT_R8_USCALED,
77 PIPE_FORMAT_R8G8_USCALED,
78 PIPE_FORMAT_R8G8B8_USCALED,
79 PIPE_FORMAT_R8G8B8A8_USCALED
80 },
81 {
82 PIPE_FORMAT_R8_UNORM,
83 PIPE_FORMAT_R8G8_UNORM,
84 PIPE_FORMAT_R8G8B8_UNORM,
85 PIPE_FORMAT_R8G8B8A8_UNORM
86 },
87 {
88 PIPE_FORMAT_R8_UINT,
89 PIPE_FORMAT_R8G8_UINT,
90 PIPE_FORMAT_R8G8B8_UINT,
91 PIPE_FORMAT_R8G8B8A8_UINT
92 },
93 },
94 { /* GL_SHORT */
95 {
96 PIPE_FORMAT_R16_SSCALED,
97 PIPE_FORMAT_R16G16_SSCALED,
98 PIPE_FORMAT_R16G16B16_SSCALED,
99 PIPE_FORMAT_R16G16B16A16_SSCALED
100 },
101 {
102 PIPE_FORMAT_R16_SNORM,
103 PIPE_FORMAT_R16G16_SNORM,
104 PIPE_FORMAT_R16G16B16_SNORM,
105 PIPE_FORMAT_R16G16B16A16_SNORM
106 },
107 {
108 PIPE_FORMAT_R16_SINT,
109 PIPE_FORMAT_R16G16_SINT,
110 PIPE_FORMAT_R16G16B16_SINT,
111 PIPE_FORMAT_R16G16B16A16_SINT
112 },
113 },
114 { /* GL_UNSIGNED_SHORT */
115 {
116 PIPE_FORMAT_R16_USCALED,
117 PIPE_FORMAT_R16G16_USCALED,
118 PIPE_FORMAT_R16G16B16_USCALED,
119 PIPE_FORMAT_R16G16B16A16_USCALED
120 },
121 {
122 PIPE_FORMAT_R16_UNORM,
123 PIPE_FORMAT_R16G16_UNORM,
124 PIPE_FORMAT_R16G16B16_UNORM,
125 PIPE_FORMAT_R16G16B16A16_UNORM
126 },
127 {
128 PIPE_FORMAT_R16_UINT,
129 PIPE_FORMAT_R16G16_UINT,
130 PIPE_FORMAT_R16G16B16_UINT,
131 PIPE_FORMAT_R16G16B16A16_UINT
132 },
133 },
134 { /* GL_INT */
135 {
136 PIPE_FORMAT_R32_SSCALED,
137 PIPE_FORMAT_R32G32_SSCALED,
138 PIPE_FORMAT_R32G32B32_SSCALED,
139 PIPE_FORMAT_R32G32B32A32_SSCALED
140 },
141 {
142 PIPE_FORMAT_R32_SNORM,
143 PIPE_FORMAT_R32G32_SNORM,
144 PIPE_FORMAT_R32G32B32_SNORM,
145 PIPE_FORMAT_R32G32B32A32_SNORM
146 },
147 {
148 PIPE_FORMAT_R32_SINT,
149 PIPE_FORMAT_R32G32_SINT,
150 PIPE_FORMAT_R32G32B32_SINT,
151 PIPE_FORMAT_R32G32B32A32_SINT
152 },
153 },
154 { /* GL_UNSIGNED_INT */
155 {
156 PIPE_FORMAT_R32_USCALED,
157 PIPE_FORMAT_R32G32_USCALED,
158 PIPE_FORMAT_R32G32B32_USCALED,
159 PIPE_FORMAT_R32G32B32A32_USCALED
160 },
161 {
162 PIPE_FORMAT_R32_UNORM,
163 PIPE_FORMAT_R32G32_UNORM,
164 PIPE_FORMAT_R32G32B32_UNORM,
165 PIPE_FORMAT_R32G32B32A32_UNORM
166 },
167 {
168 PIPE_FORMAT_R32_UINT,
169 PIPE_FORMAT_R32G32_UINT,
170 PIPE_FORMAT_R32G32B32_UINT,
171 PIPE_FORMAT_R32G32B32A32_UINT
172 },
173 },
174 { /* GL_FLOAT */
175 {
176 PIPE_FORMAT_R32_FLOAT,
177 PIPE_FORMAT_R32G32_FLOAT,
178 PIPE_FORMAT_R32G32B32_FLOAT,
179 PIPE_FORMAT_R32G32B32A32_FLOAT
180 },
181 {
182 PIPE_FORMAT_R32_FLOAT,
183 PIPE_FORMAT_R32G32_FLOAT,
184 PIPE_FORMAT_R32G32B32_FLOAT,
185 PIPE_FORMAT_R32G32B32A32_FLOAT
186 },
187 },
188 {{0}}, /* GL_2_BYTES */
189 {{0}}, /* GL_3_BYTES */
190 {{0}}, /* GL_4_BYTES */
191 { /* GL_DOUBLE */
192 {
193 PIPE_FORMAT_R64_FLOAT,
194 PIPE_FORMAT_R64G64_FLOAT,
195 PIPE_FORMAT_R64G64B64_FLOAT,
196 PIPE_FORMAT_R64G64B64A64_FLOAT
197 },
198 {
199 PIPE_FORMAT_R64_FLOAT,
200 PIPE_FORMAT_R64G64_FLOAT,
201 PIPE_FORMAT_R64G64B64_FLOAT,
202 PIPE_FORMAT_R64G64B64A64_FLOAT
203 },
204 },
205 { /* GL_HALF_FLOAT */
206 {
207 PIPE_FORMAT_R16_FLOAT,
208 PIPE_FORMAT_R16G16_FLOAT,
209 PIPE_FORMAT_R16G16B16_FLOAT,
210 PIPE_FORMAT_R16G16B16A16_FLOAT
211 },
212 {
213 PIPE_FORMAT_R16_FLOAT,
214 PIPE_FORMAT_R16G16_FLOAT,
215 PIPE_FORMAT_R16G16B16_FLOAT,
216 PIPE_FORMAT_R16G16B16A16_FLOAT
217 },
218 },
219 { /* GL_FIXED */
220 {
221 PIPE_FORMAT_R32_FIXED,
222 PIPE_FORMAT_R32G32_FIXED,
223 PIPE_FORMAT_R32G32B32_FIXED,
224 PIPE_FORMAT_R32G32B32A32_FIXED
225 },
226 {
227 PIPE_FORMAT_R32_FIXED,
228 PIPE_FORMAT_R32G32_FIXED,
229 PIPE_FORMAT_R32G32B32_FIXED,
230 PIPE_FORMAT_R32G32B32A32_FIXED
231 },
232 },
233 };
234
235
236 /**
237 * Return a PIPE_FORMAT_x for the given GL datatype and size.
238 */
239 enum pipe_format
240 st_pipe_vertex_format(GLenum type, GLuint size, GLenum format,
241 GLboolean normalized, GLboolean integer)
242 {
243 unsigned index;
244
245 assert(size >= 1 && size <= 4);
246 assert(format == GL_RGBA || format == GL_BGRA);
247
248 switch (type) {
249 case GL_HALF_FLOAT_OES:
250 type = GL_HALF_FLOAT;
251 break;
252
253 case GL_INT_2_10_10_10_REV:
254 assert(size == 4 && !integer);
255
256 if (format == GL_BGRA) {
257 if (normalized)
258 return PIPE_FORMAT_B10G10R10A2_SNORM;
259 else
260 return PIPE_FORMAT_B10G10R10A2_SSCALED;
261 } else {
262 if (normalized)
263 return PIPE_FORMAT_R10G10B10A2_SNORM;
264 else
265 return PIPE_FORMAT_R10G10B10A2_SSCALED;
266 }
267 break;
268
269 case GL_UNSIGNED_INT_2_10_10_10_REV:
270 assert(size == 4 && !integer);
271
272 if (format == GL_BGRA) {
273 if (normalized)
274 return PIPE_FORMAT_B10G10R10A2_UNORM;
275 else
276 return PIPE_FORMAT_B10G10R10A2_USCALED;
277 } else {
278 if (normalized)
279 return PIPE_FORMAT_R10G10B10A2_UNORM;
280 else
281 return PIPE_FORMAT_R10G10B10A2_USCALED;
282 }
283 break;
284
285 case GL_UNSIGNED_INT_10F_11F_11F_REV:
286 assert(size == 3 && !integer && format == GL_RGBA);
287 return PIPE_FORMAT_R11G11B10_FLOAT;
288
289 case GL_UNSIGNED_BYTE:
290 if (format == GL_BGRA) {
291 /* this is an odd-ball case */
292 assert(normalized);
293 return PIPE_FORMAT_B8G8R8A8_UNORM;
294 }
295 break;
296 }
297
298 index = integer*2 + normalized;
299 assert(index <= 2);
300 assert(type >= GL_BYTE && type <= GL_FIXED);
301 return vertex_formats[type - GL_BYTE][index][size-1];
302 }
303
304 static const struct gl_vertex_array *
305 get_client_array(const struct gl_vertex_array *arrays,
306 unsigned mesaAttr)
307 {
308 /* st_program uses 0xffffffff to denote a double placeholder attribute */
309 if (mesaAttr == ST_DOUBLE_ATTRIB_PLACEHOLDER)
310 return NULL;
311 return &arrays[mesaAttr];
312 }
313
314 /**
315 * Examine the active arrays to determine if we have interleaved
316 * vertex arrays all living in one VBO, or all living in user space.
317 */
318 static GLboolean
319 is_interleaved_arrays(const struct st_vertex_program *vp,
320 const struct gl_vertex_array *arrays,
321 unsigned num_inputs)
322 {
323 GLuint attr;
324 const struct gl_buffer_object *firstBufObj = NULL;
325 GLint firstStride = -1;
326 const GLubyte *firstPtr = NULL;
327 GLboolean userSpaceBuffer = GL_FALSE;
328
329 for (attr = 0; attr < num_inputs; attr++) {
330 const struct gl_vertex_array *array;
331 const struct gl_vertex_buffer_binding *binding;
332 const struct gl_array_attributes *attrib;
333 const GLubyte *ptr;
334 const struct gl_buffer_object *bufObj;
335 GLsizei stride;
336
337 array = get_client_array(arrays, vp->index_to_input[attr]);
338 if (!array)
339 continue;
340
341 binding = array->BufferBinding;
342 attrib = array->VertexAttrib;
343 stride = binding->Stride; /* in bytes */
344 ptr = _mesa_vertex_attrib_address(attrib, binding);
345
346 /* To keep things simple, don't allow interleaved zero-stride attribs. */
347 if (stride == 0)
348 return false;
349
350 bufObj = binding->BufferObj;
351 if (attr == 0) {
352 /* save info about the first array */
353 firstStride = stride;
354 firstPtr = ptr;
355 firstBufObj = bufObj;
356 userSpaceBuffer = !_mesa_is_bufferobj(bufObj);
357 }
358 else {
359 /* check if other arrays interleave with the first, in same buffer */
360 if (stride != firstStride)
361 return GL_FALSE; /* strides don't match */
362
363 if (bufObj != firstBufObj)
364 return GL_FALSE; /* arrays in different VBOs */
365
366 if (llabs(ptr - firstPtr) > firstStride)
367 return GL_FALSE; /* arrays start too far apart */
368
369 if ((!_mesa_is_bufferobj(bufObj)) != userSpaceBuffer)
370 return GL_FALSE; /* mix of VBO and user-space arrays */
371 }
372 }
373
374 return GL_TRUE;
375 }
376
377 static void init_velement(struct pipe_vertex_element *velement,
378 int src_offset, int format,
379 int instance_divisor, int vbo_index)
380 {
381 velement->src_offset = src_offset;
382 velement->src_format = format;
383 velement->instance_divisor = instance_divisor;
384 velement->vertex_buffer_index = vbo_index;
385 assert(velement->src_format);
386 }
387
388 static void init_velement_lowered(const struct st_vertex_program *vp,
389 struct pipe_vertex_element *velements,
390 int src_offset, int format,
391 int instance_divisor, int vbo_index,
392 int nr_components, GLboolean doubles,
393 GLuint *attr_idx)
394 {
395 int idx = *attr_idx;
396 if (doubles) {
397 int lower_format;
398
399 if (nr_components < 2)
400 lower_format = PIPE_FORMAT_R32G32_UINT;
401 else
402 lower_format = PIPE_FORMAT_R32G32B32A32_UINT;
403
404 init_velement(&velements[idx], src_offset,
405 lower_format, instance_divisor, vbo_index);
406 idx++;
407
408 if (idx < vp->num_inputs &&
409 vp->index_to_input[idx] == ST_DOUBLE_ATTRIB_PLACEHOLDER) {
410 if (nr_components >= 3) {
411 if (nr_components == 3)
412 lower_format = PIPE_FORMAT_R32G32_UINT;
413 else
414 lower_format = PIPE_FORMAT_R32G32B32A32_UINT;
415
416 init_velement(&velements[idx], src_offset + 4 * sizeof(float),
417 lower_format, instance_divisor, vbo_index);
418 } else {
419 /* The values here are undefined. Fill in some conservative
420 * dummy values.
421 */
422 init_velement(&velements[idx], src_offset, PIPE_FORMAT_R32G32_UINT,
423 instance_divisor, vbo_index);
424 }
425
426 idx++;
427 }
428 } else {
429 init_velement(&velements[idx], src_offset,
430 format, instance_divisor, vbo_index);
431 idx++;
432 }
433 *attr_idx = idx;
434 }
435
436 static void
437 set_vertex_attribs(struct st_context *st,
438 struct pipe_vertex_buffer *vbuffers,
439 unsigned num_vbuffers,
440 struct pipe_vertex_element *velements,
441 unsigned num_velements)
442 {
443 struct cso_context *cso = st->cso_context;
444
445 cso_set_vertex_buffers(cso, 0, num_vbuffers, vbuffers);
446 if (st->last_num_vbuffers > num_vbuffers) {
447 /* Unbind remaining buffers, if any. */
448 cso_set_vertex_buffers(cso, num_vbuffers,
449 st->last_num_vbuffers - num_vbuffers, NULL);
450 }
451 st->last_num_vbuffers = num_vbuffers;
452 cso_set_vertex_elements(cso, num_velements, velements);
453 }
454
455 /**
456 * Set up for drawing interleaved arrays that all live in one VBO
457 * or all live in user space.
458 * \param vbuffer returns vertex buffer info
459 * \param velements returns vertex element info
460 */
461 static void
462 setup_interleaved_attribs(struct st_context *st,
463 const struct st_vertex_program *vp,
464 const struct gl_vertex_array *arrays,
465 unsigned num_inputs)
466 {
467 struct pipe_vertex_buffer vbuffer;
468 struct pipe_vertex_element velements[PIPE_MAX_ATTRIBS] = {{0}};
469 GLuint attr;
470 const GLubyte *low_addr = NULL;
471 GLboolean usingVBO; /* all arrays in a VBO? */
472 struct gl_buffer_object *bufobj;
473 GLsizei stride;
474
475 /* Find the lowest address of the arrays we're drawing,
476 * Init bufobj and stride.
477 */
478 if (num_inputs) {
479 const struct gl_vertex_array *array;
480 const struct gl_vertex_buffer_binding *binding;
481 const struct gl_array_attributes *attrib;
482
483 array = get_client_array(arrays, vp->index_to_input[0]);
484 assert(array);
485
486 binding = array->BufferBinding;
487 attrib = array->VertexAttrib;
488
489 /* Since we're doing interleaved arrays, we know there'll be at most
490 * one buffer object and the stride will be the same for all arrays.
491 * Grab them now.
492 */
493 bufobj = binding->BufferObj;
494 stride = binding->Stride;
495
496 low_addr = _mesa_vertex_attrib_address(attrib, binding);
497
498 for (attr = 1; attr < num_inputs; attr++) {
499 const GLubyte *start;
500 array = get_client_array(arrays, vp->index_to_input[attr]);
501 if (!array)
502 continue;
503 binding = array->BufferBinding;
504 attrib = array->VertexAttrib;
505 start = _mesa_vertex_attrib_address(attrib, binding);
506 low_addr = MIN2(low_addr, start);
507 }
508 }
509 else {
510 /* not sure we'll ever have zero inputs, but play it safe */
511 bufobj = NULL;
512 stride = 0;
513 low_addr = 0;
514 }
515
516 /* are the arrays in user space? */
517 usingVBO = _mesa_is_bufferobj(bufobj);
518
519 for (attr = 0; attr < num_inputs;) {
520 const struct gl_vertex_array *array;
521 const struct gl_vertex_buffer_binding *binding;
522 const struct gl_array_attributes *attrib;
523 const GLubyte *ptr;
524 unsigned src_offset;
525 unsigned src_format;
526
527 array = get_client_array(arrays, vp->index_to_input[attr]);
528 assert(array);
529
530 binding = array->BufferBinding;
531 attrib = array->VertexAttrib;
532 ptr = _mesa_vertex_attrib_address(attrib, binding);
533
534 src_offset = (unsigned) (ptr - low_addr);
535 assert(attrib->_ElementSize ==
536 _mesa_bytes_per_vertex_attrib(attrib->Size, attrib->Type));
537
538 src_format = st_pipe_vertex_format(attrib->Type,
539 attrib->Size,
540 attrib->Format,
541 attrib->Normalized,
542 attrib->Integer);
543
544 init_velement_lowered(vp, velements, src_offset, src_format,
545 binding->InstanceDivisor, 0,
546 attrib->Size, attrib->Doubles, &attr);
547 }
548
549 /*
550 * Return the vbuffer info and setup user-space attrib info, if needed.
551 */
552 if (num_inputs == 0) {
553 /* just defensive coding here */
554 vbuffer.buffer.resource = NULL;
555 vbuffer.is_user_buffer = false;
556 vbuffer.buffer_offset = 0;
557 vbuffer.stride = 0;
558 }
559 else if (usingVBO) {
560 /* all interleaved arrays in a VBO */
561 struct st_buffer_object *stobj = st_buffer_object(bufobj);
562
563 if (!stobj || !stobj->buffer) {
564 st->vertex_array_out_of_memory = true;
565 return; /* out-of-memory error probably */
566 }
567
568 vbuffer.buffer.resource = stobj->buffer;
569 vbuffer.is_user_buffer = false;
570 vbuffer.buffer_offset = pointer_to_offset(low_addr);
571 vbuffer.stride = stride;
572 }
573 else {
574 /* all interleaved arrays in user memory */
575 vbuffer.buffer.user = low_addr;
576 vbuffer.is_user_buffer = !!low_addr; /* if NULL, then unbind */
577 vbuffer.buffer_offset = 0;
578 vbuffer.stride = stride;
579
580 if (low_addr)
581 st->draw_needs_minmax_index = true;
582 }
583
584 set_vertex_attribs(st, &vbuffer, num_inputs ? 1 : 0,
585 velements, num_inputs);
586 }
587
588 /**
589 * Set up a separate pipe_vertex_buffer and pipe_vertex_element for each
590 * vertex attribute.
591 * \param vbuffer returns vertex buffer info
592 * \param velements returns vertex element info
593 */
594 static void
595 setup_non_interleaved_attribs(struct st_context *st,
596 const struct st_vertex_program *vp,
597 const struct gl_vertex_array *arrays,
598 unsigned num_inputs)
599 {
600 struct gl_context *ctx = st->ctx;
601 struct pipe_vertex_buffer vbuffer[PIPE_MAX_ATTRIBS];
602 struct pipe_vertex_element velements[PIPE_MAX_ATTRIBS] = {{0}};
603 unsigned num_vbuffers = 0;
604 unsigned unref_buffers = 0;
605 GLuint attr;
606
607 for (attr = 0; attr < num_inputs;) {
608 const unsigned mesaAttr = vp->index_to_input[attr];
609 const struct gl_vertex_array *array;
610 const struct gl_vertex_buffer_binding *binding;
611 const struct gl_array_attributes *attrib;
612 struct gl_buffer_object *bufobj;
613 GLsizei stride;
614 unsigned src_format;
615 unsigned bufidx;
616
617 array = get_client_array(arrays, mesaAttr);
618 assert(array);
619
620 bufidx = num_vbuffers++;
621
622 binding = array->BufferBinding;
623 attrib = array->VertexAttrib;
624 stride = binding->Stride;
625 bufobj = binding->BufferObj;
626 assert(attrib->_ElementSize ==
627 _mesa_bytes_per_vertex_attrib(attrib->Size, attrib->Type));
628
629 if (_mesa_is_bufferobj(bufobj)) {
630 /* Attribute data is in a VBO.
631 * Recall that for VBOs, the gl_vertex_array->Ptr field is
632 * really an offset from the start of the VBO, not a pointer.
633 */
634 struct st_buffer_object *stobj = st_buffer_object(bufobj);
635
636 if (!stobj || !stobj->buffer) {
637 st->vertex_array_out_of_memory = true;
638 return; /* out-of-memory error probably */
639 }
640
641 vbuffer[bufidx].buffer.resource = stobj->buffer;
642 vbuffer[bufidx].is_user_buffer = false;
643 vbuffer[bufidx].buffer_offset =
644 binding->Offset + attrib->RelativeOffset;
645 }
646 else {
647 if (stride == 0) {
648 unsigned size = attrib->_ElementSize;
649 /* This is optimal for GPU cache line usage if the upload size
650 * is <= cache line size.
651 */
652 unsigned alignment = util_next_power_of_two(size);
653
654 assert(attrib->Ptr);
655 vbuffer[bufidx].buffer.user = attrib->Ptr;
656 void *ptr = attrib->Ptr ? (void*)attrib->Ptr :
657 (void*)ctx->Current.Attrib[mesaAttr];
658
659 vbuffer[bufidx].is_user_buffer = false;
660 vbuffer[bufidx].buffer.resource = NULL;
661
662 /* Use const_uploader for zero-stride vertex attributes, because
663 * it may use a better memory placement than stream_uploader.
664 * The reason is that zero-stride attributes can be fetched many
665 * times (thousands of times), so a better placement is going to
666 * perform better.
667 *
668 * Upload the maximum possible size, which is 4x GLdouble = 32.
669 */
670 u_upload_data(st->can_bind_const_buffer_as_vertex ?
671 st->pipe->const_uploader :
672 st->pipe->stream_uploader,
673 0, size, alignment, ptr,
674 &vbuffer[bufidx].buffer_offset,
675 &vbuffer[bufidx].buffer.resource);
676 unref_buffers |= 1u << bufidx;
677 } else {
678 assert(attrib->Ptr);
679 vbuffer[bufidx].buffer.user = attrib->Ptr;
680 vbuffer[bufidx].is_user_buffer = true;
681 vbuffer[bufidx].buffer_offset = 0;
682
683 if (!binding->InstanceDivisor)
684 st->draw_needs_minmax_index = true;
685 }
686 }
687
688 /* common-case setup */
689 vbuffer[bufidx].stride = stride; /* in bytes */
690
691 src_format = st_pipe_vertex_format(attrib->Type,
692 attrib->Size,
693 attrib->Format,
694 attrib->Normalized,
695 attrib->Integer);
696
697 init_velement_lowered(vp, velements, 0, src_format,
698 binding->InstanceDivisor, bufidx,
699 attrib->Size, attrib->Doubles, &attr);
700 }
701
702 if (!ctx->Const.AllowMappedBuffersDuringExecution) {
703 u_upload_unmap(st->pipe->stream_uploader);
704 }
705
706 set_vertex_attribs(st, vbuffer, num_vbuffers, velements, num_inputs);
707
708 /* Unreference uploaded zero-stride vertex buffers. */
709 while (unref_buffers) {
710 unsigned i = u_bit_scan(&unref_buffers);
711 pipe_resource_reference(&vbuffer[i].buffer.resource, NULL);
712 }
713 }
714
715 void st_update_array(struct st_context *st)
716 {
717 struct gl_context *ctx = st->ctx;
718 const struct gl_vertex_array *arrays = ctx->Array._DrawArrays;
719 const struct st_vertex_program *vp;
720 unsigned num_inputs;
721
722 st->vertex_array_out_of_memory = FALSE;
723 st->draw_needs_minmax_index = false;
724
725 /* No drawing has been done yet, so do nothing. */
726 if (!arrays)
727 return;
728
729 /* vertex program validation must be done before this */
730 vp = st->vp;
731 num_inputs = st->vp_variant->num_inputs;
732
733 if (is_interleaved_arrays(vp, arrays, num_inputs))
734 setup_interleaved_attribs(st, vp, arrays, num_inputs);
735 else
736 setup_non_interleaved_attribs(st, vp, arrays, num_inputs);
737 }