2 * Copyright 2003 VMware, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "main/bufferobj.h"
27 #include "main/context.h"
28 #include "main/enums.h"
29 #include "main/macros.h"
30 #include "main/glformats.h"
33 #include "brw_defines.h"
34 #include "brw_context.h"
35 #include "brw_state.h"
37 #include "intel_batchbuffer.h"
38 #include "intel_buffer_objects.h"
40 static const GLuint double_types_float
[5] = {
43 ISL_FORMAT_R64G64_FLOAT
,
44 ISL_FORMAT_R64G64B64_FLOAT
,
45 ISL_FORMAT_R64G64B64A64_FLOAT
48 static const GLuint double_types_passthru
[5] = {
50 ISL_FORMAT_R64_PASSTHRU
,
51 ISL_FORMAT_R64G64_PASSTHRU
,
52 ISL_FORMAT_R64G64B64_PASSTHRU
,
53 ISL_FORMAT_R64G64B64A64_PASSTHRU
56 static const GLuint float_types
[5] = {
59 ISL_FORMAT_R32G32_FLOAT
,
60 ISL_FORMAT_R32G32B32_FLOAT
,
61 ISL_FORMAT_R32G32B32A32_FLOAT
64 static const GLuint half_float_types
[5] = {
67 ISL_FORMAT_R16G16_FLOAT
,
68 ISL_FORMAT_R16G16B16_FLOAT
,
69 ISL_FORMAT_R16G16B16A16_FLOAT
72 static const GLuint fixed_point_types
[5] = {
74 ISL_FORMAT_R32_SFIXED
,
75 ISL_FORMAT_R32G32_SFIXED
,
76 ISL_FORMAT_R32G32B32_SFIXED
,
77 ISL_FORMAT_R32G32B32A32_SFIXED
,
80 static const GLuint uint_types_direct
[5] = {
83 ISL_FORMAT_R32G32_UINT
,
84 ISL_FORMAT_R32G32B32_UINT
,
85 ISL_FORMAT_R32G32B32A32_UINT
88 static const GLuint uint_types_norm
[5] = {
91 ISL_FORMAT_R32G32_UNORM
,
92 ISL_FORMAT_R32G32B32_UNORM
,
93 ISL_FORMAT_R32G32B32A32_UNORM
96 static const GLuint uint_types_scale
[5] = {
98 ISL_FORMAT_R32_USCALED
,
99 ISL_FORMAT_R32G32_USCALED
,
100 ISL_FORMAT_R32G32B32_USCALED
,
101 ISL_FORMAT_R32G32B32A32_USCALED
104 static const GLuint int_types_direct
[5] = {
107 ISL_FORMAT_R32G32_SINT
,
108 ISL_FORMAT_R32G32B32_SINT
,
109 ISL_FORMAT_R32G32B32A32_SINT
112 static const GLuint int_types_norm
[5] = {
114 ISL_FORMAT_R32_SNORM
,
115 ISL_FORMAT_R32G32_SNORM
,
116 ISL_FORMAT_R32G32B32_SNORM
,
117 ISL_FORMAT_R32G32B32A32_SNORM
120 static const GLuint int_types_scale
[5] = {
122 ISL_FORMAT_R32_SSCALED
,
123 ISL_FORMAT_R32G32_SSCALED
,
124 ISL_FORMAT_R32G32B32_SSCALED
,
125 ISL_FORMAT_R32G32B32A32_SSCALED
128 static const GLuint ushort_types_direct
[5] = {
131 ISL_FORMAT_R16G16_UINT
,
132 ISL_FORMAT_R16G16B16_UINT
,
133 ISL_FORMAT_R16G16B16A16_UINT
136 static const GLuint ushort_types_norm
[5] = {
138 ISL_FORMAT_R16_UNORM
,
139 ISL_FORMAT_R16G16_UNORM
,
140 ISL_FORMAT_R16G16B16_UNORM
,
141 ISL_FORMAT_R16G16B16A16_UNORM
144 static const GLuint ushort_types_scale
[5] = {
146 ISL_FORMAT_R16_USCALED
,
147 ISL_FORMAT_R16G16_USCALED
,
148 ISL_FORMAT_R16G16B16_USCALED
,
149 ISL_FORMAT_R16G16B16A16_USCALED
152 static const GLuint short_types_direct
[5] = {
155 ISL_FORMAT_R16G16_SINT
,
156 ISL_FORMAT_R16G16B16_SINT
,
157 ISL_FORMAT_R16G16B16A16_SINT
160 static const GLuint short_types_norm
[5] = {
162 ISL_FORMAT_R16_SNORM
,
163 ISL_FORMAT_R16G16_SNORM
,
164 ISL_FORMAT_R16G16B16_SNORM
,
165 ISL_FORMAT_R16G16B16A16_SNORM
168 static const GLuint short_types_scale
[5] = {
170 ISL_FORMAT_R16_SSCALED
,
171 ISL_FORMAT_R16G16_SSCALED
,
172 ISL_FORMAT_R16G16B16_SSCALED
,
173 ISL_FORMAT_R16G16B16A16_SSCALED
176 static const GLuint ubyte_types_direct
[5] = {
179 ISL_FORMAT_R8G8_UINT
,
180 ISL_FORMAT_R8G8B8_UINT
,
181 ISL_FORMAT_R8G8B8A8_UINT
184 static const GLuint ubyte_types_norm
[5] = {
187 ISL_FORMAT_R8G8_UNORM
,
188 ISL_FORMAT_R8G8B8_UNORM
,
189 ISL_FORMAT_R8G8B8A8_UNORM
192 static const GLuint ubyte_types_scale
[5] = {
194 ISL_FORMAT_R8_USCALED
,
195 ISL_FORMAT_R8G8_USCALED
,
196 ISL_FORMAT_R8G8B8_USCALED
,
197 ISL_FORMAT_R8G8B8A8_USCALED
200 static const GLuint byte_types_direct
[5] = {
203 ISL_FORMAT_R8G8_SINT
,
204 ISL_FORMAT_R8G8B8_SINT
,
205 ISL_FORMAT_R8G8B8A8_SINT
208 static const GLuint byte_types_norm
[5] = {
211 ISL_FORMAT_R8G8_SNORM
,
212 ISL_FORMAT_R8G8B8_SNORM
,
213 ISL_FORMAT_R8G8B8A8_SNORM
216 static const GLuint byte_types_scale
[5] = {
218 ISL_FORMAT_R8_SSCALED
,
219 ISL_FORMAT_R8G8_SSCALED
,
220 ISL_FORMAT_R8G8B8_SSCALED
,
221 ISL_FORMAT_R8G8B8A8_SSCALED
225 double_types(struct brw_context
*brw
,
229 /* From the BDW PRM, Volume 2d, page 588 (VERTEX_ELEMENT_STATE):
230 * "When SourceElementFormat is set to one of the *64*_PASSTHRU formats,
231 * 64-bit components are stored in the URB without any conversion."
232 * Also included on BDW PRM, Volume 7, page 470, table "Source Element
233 * Formats Supported in VF Unit"
235 * Previous PRMs don't include those references, so for gen7 we can't use
236 * PASSTHRU formats directly. But in any case, we prefer to return passthru
237 * even in that case, because that reflects what we want to achieve, even
238 * if we would need to workaround on gen < 8.
241 ? double_types_passthru
[size
]
242 : double_types_float
[size
]);
246 * Given vertex array type/size/format/normalized info, return
247 * the appopriate hardware surface type.
248 * Format will be GL_RGBA or possibly GL_BGRA for GLubyte[4] color arrays.
251 brw_get_vertex_surface_type(struct brw_context
*brw
,
252 const struct gl_vertex_array
*glarray
)
254 int size
= glarray
->Size
;
255 const bool is_ivybridge_or_older
=
256 brw
->gen
<= 7 && !brw
->is_baytrail
&& !brw
->is_haswell
;
258 if (unlikely(INTEL_DEBUG
& DEBUG_VERTS
))
259 fprintf(stderr
, "type %s size %d normalized %d\n",
260 _mesa_enum_to_string(glarray
->Type
),
261 glarray
->Size
, glarray
->Normalized
);
263 if (glarray
->Integer
) {
264 assert(glarray
->Format
== GL_RGBA
); /* sanity check */
265 switch (glarray
->Type
) {
266 case GL_INT
: return int_types_direct
[size
];
268 if (is_ivybridge_or_older
&& size
== 3)
269 return short_types_direct
[4];
271 return short_types_direct
[size
];
273 if (is_ivybridge_or_older
&& size
== 3)
274 return byte_types_direct
[4];
276 return byte_types_direct
[size
];
277 case GL_UNSIGNED_INT
: return uint_types_direct
[size
];
278 case GL_UNSIGNED_SHORT
:
279 if (is_ivybridge_or_older
&& size
== 3)
280 return ushort_types_direct
[4];
282 return ushort_types_direct
[size
];
283 case GL_UNSIGNED_BYTE
:
284 if (is_ivybridge_or_older
&& size
== 3)
285 return ubyte_types_direct
[4];
287 return ubyte_types_direct
[size
];
288 default: unreachable("not reached");
290 } else if (glarray
->Type
== GL_UNSIGNED_INT_10F_11F_11F_REV
) {
291 return ISL_FORMAT_R11G11B10_FLOAT
;
292 } else if (glarray
->Normalized
) {
293 switch (glarray
->Type
) {
294 case GL_DOUBLE
: return double_types(brw
, size
, glarray
->Doubles
);
295 case GL_FLOAT
: return float_types
[size
];
297 case GL_HALF_FLOAT_OES
:
298 if (brw
->gen
< 6 && size
== 3)
299 return half_float_types
[4];
301 return half_float_types
[size
];
302 case GL_INT
: return int_types_norm
[size
];
303 case GL_SHORT
: return short_types_norm
[size
];
304 case GL_BYTE
: return byte_types_norm
[size
];
305 case GL_UNSIGNED_INT
: return uint_types_norm
[size
];
306 case GL_UNSIGNED_SHORT
: return ushort_types_norm
[size
];
307 case GL_UNSIGNED_BYTE
:
308 if (glarray
->Format
== GL_BGRA
) {
309 /* See GL_EXT_vertex_array_bgra */
311 return ISL_FORMAT_B8G8R8A8_UNORM
;
314 return ubyte_types_norm
[size
];
317 if (brw
->gen
>= 8 || brw
->is_haswell
)
318 return fixed_point_types
[size
];
320 /* This produces GL_FIXED inputs as values between INT32_MIN and
321 * INT32_MAX, which will be scaled down by 1/65536 by the VS.
323 return int_types_scale
[size
];
324 /* See GL_ARB_vertex_type_2_10_10_10_rev.
325 * W/A: Pre-Haswell, the hardware doesn't really support the formats we'd
326 * like to use here, so upload everything as UINT and fix
329 case GL_INT_2_10_10_10_REV
:
331 if (brw
->gen
>= 8 || brw
->is_haswell
) {
332 return glarray
->Format
== GL_BGRA
333 ? ISL_FORMAT_B10G10R10A2_SNORM
334 : ISL_FORMAT_R10G10B10A2_SNORM
;
336 return ISL_FORMAT_R10G10B10A2_UINT
;
337 case GL_UNSIGNED_INT_2_10_10_10_REV
:
339 if (brw
->gen
>= 8 || brw
->is_haswell
) {
340 return glarray
->Format
== GL_BGRA
341 ? ISL_FORMAT_B10G10R10A2_UNORM
342 : ISL_FORMAT_R10G10B10A2_UNORM
;
344 return ISL_FORMAT_R10G10B10A2_UINT
;
345 default: unreachable("not reached");
349 /* See GL_ARB_vertex_type_2_10_10_10_rev.
350 * W/A: the hardware doesn't really support the formats we'd
351 * like to use here, so upload everything as UINT and fix
354 if (glarray
->Type
== GL_INT_2_10_10_10_REV
) {
356 if (brw
->gen
>= 8 || brw
->is_haswell
) {
357 return glarray
->Format
== GL_BGRA
358 ? ISL_FORMAT_B10G10R10A2_SSCALED
359 : ISL_FORMAT_R10G10B10A2_SSCALED
;
361 return ISL_FORMAT_R10G10B10A2_UINT
;
362 } else if (glarray
->Type
== GL_UNSIGNED_INT_2_10_10_10_REV
) {
364 if (brw
->gen
>= 8 || brw
->is_haswell
) {
365 return glarray
->Format
== GL_BGRA
366 ? ISL_FORMAT_B10G10R10A2_USCALED
367 : ISL_FORMAT_R10G10B10A2_USCALED
;
369 return ISL_FORMAT_R10G10B10A2_UINT
;
371 assert(glarray
->Format
== GL_RGBA
); /* sanity check */
372 switch (glarray
->Type
) {
373 case GL_DOUBLE
: return double_types(brw
, size
, glarray
->Doubles
);
374 case GL_FLOAT
: return float_types
[size
];
376 case GL_HALF_FLOAT_OES
:
377 if (brw
->gen
< 6 && size
== 3)
378 return half_float_types
[4];
380 return half_float_types
[size
];
381 case GL_INT
: return int_types_scale
[size
];
382 case GL_SHORT
: return short_types_scale
[size
];
383 case GL_BYTE
: return byte_types_scale
[size
];
384 case GL_UNSIGNED_INT
: return uint_types_scale
[size
];
385 case GL_UNSIGNED_SHORT
: return ushort_types_scale
[size
];
386 case GL_UNSIGNED_BYTE
: return ubyte_types_scale
[size
];
388 if (brw
->gen
>= 8 || brw
->is_haswell
)
389 return fixed_point_types
[size
];
391 /* This produces GL_FIXED inputs as values between INT32_MIN and
392 * INT32_MAX, which will be scaled down by 1/65536 by the VS.
394 return int_types_scale
[size
];
395 default: unreachable("not reached");
401 copy_array_to_vbo_array(struct brw_context
*brw
,
402 struct brw_vertex_element
*element
,
404 struct brw_vertex_buffer
*buffer
,
407 const int src_stride
= element
->glarray
->StrideB
;
409 /* If the source stride is zero, we just want to upload the current
410 * attribute once and set the buffer's stride to 0. There's no need
411 * to replicate it out.
413 if (src_stride
== 0) {
414 intel_upload_data(brw
, element
->glarray
->Ptr
,
415 element
->glarray
->_ElementSize
,
416 element
->glarray
->_ElementSize
,
417 &buffer
->bo
, &buffer
->offset
);
420 buffer
->size
= element
->glarray
->_ElementSize
;
424 const unsigned char *src
= element
->glarray
->Ptr
+ min
* src_stride
;
425 int count
= max
- min
+ 1;
426 GLuint size
= count
* dst_stride
;
427 uint8_t *dst
= intel_upload_space(brw
, size
, dst_stride
,
428 &buffer
->bo
, &buffer
->offset
);
430 /* The GL 4.5 spec says:
431 * "If any enabled array’s buffer binding is zero when DrawArrays or
432 * one of the other drawing commands defined in section 10.4 is called,
433 * the result is undefined."
435 * In this case, let's the dst with undefined values
438 if (dst_stride
== src_stride
) {
439 memcpy(dst
, src
, size
);
442 memcpy(dst
, src
, dst_stride
);
448 buffer
->stride
= dst_stride
;
453 brw_prepare_vertices(struct brw_context
*brw
)
455 struct gl_context
*ctx
= &brw
->ctx
;
456 /* BRW_NEW_VS_PROG_DATA */
457 const struct brw_vs_prog_data
*vs_prog_data
=
458 brw_vs_prog_data(brw
->vs
.base
.prog_data
);
459 GLbitfield64 vs_inputs
= vs_prog_data
->inputs_read
;
460 const unsigned char *ptr
= NULL
;
461 GLuint interleaved
= 0;
462 unsigned int min_index
= brw
->vb
.min_index
+ brw
->basevertex
;
463 unsigned int max_index
= brw
->vb
.max_index
+ brw
->basevertex
;
467 struct brw_vertex_element
*upload
[VERT_ATTRIB_MAX
];
468 GLuint nr_uploads
= 0;
472 * On gen6+, edge flags don't end up in the VUE (either in or out of the
473 * VS). Instead, they're uploaded as the last vertex element, and the data
474 * is passed sideband through the fixed function units. So, we need to
475 * prepare the vertex buffer for it, but it's not present in inputs_read.
477 if (brw
->gen
>= 6 && (ctx
->Polygon
.FrontMode
!= GL_FILL
||
478 ctx
->Polygon
.BackMode
!= GL_FILL
)) {
479 vs_inputs
|= VERT_BIT_EDGEFLAG
;
483 fprintf(stderr
, "%s %d..%d\n", __func__
, min_index
, max_index
);
485 /* Accumulate the list of enabled arrays. */
486 brw
->vb
.nr_enabled
= 0;
488 GLuint first
= ffsll(vs_inputs
) - 1;
491 first
- DIV_ROUND_UP(_mesa_bitcount_64(vs_prog_data
->double_inputs_read
&
492 BITFIELD64_MASK(first
)), 2);
493 struct brw_vertex_element
*input
= &brw
->vb
.inputs
[index
];
494 input
->is_dual_slot
= (vs_prog_data
->double_inputs_read
& BITFIELD64_BIT(first
)) != 0;
495 vs_inputs
&= ~BITFIELD64_BIT(first
);
496 if (input
->is_dual_slot
)
497 vs_inputs
&= ~BITFIELD64_BIT(first
+ 1);
498 brw
->vb
.enabled
[brw
->vb
.nr_enabled
++] = input
;
501 if (brw
->vb
.nr_enabled
== 0)
504 if (brw
->vb
.nr_buffers
)
507 /* The range of data in a given buffer represented as [min, max) */
508 struct intel_buffer_object
*enabled_buffer
[VERT_ATTRIB_MAX
];
509 uint32_t buffer_range_start
[VERT_ATTRIB_MAX
];
510 uint32_t buffer_range_end
[VERT_ATTRIB_MAX
];
512 for (i
= j
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
513 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
514 const struct gl_vertex_array
*glarray
= input
->glarray
;
516 if (_mesa_is_bufferobj(glarray
->BufferObj
)) {
517 struct intel_buffer_object
*intel_buffer
=
518 intel_buffer_object(glarray
->BufferObj
);
520 const uint32_t offset
= (uintptr_t)glarray
->Ptr
;
522 /* Start with the worst case */
524 uint32_t range
= intel_buffer
->Base
.Size
;
525 if (glarray
->InstanceDivisor
) {
526 if (brw
->num_instances
) {
527 start
= offset
+ glarray
->StrideB
* brw
->baseinstance
;
528 range
= (glarray
->StrideB
* ((brw
->num_instances
- 1) /
529 glarray
->InstanceDivisor
) +
530 glarray
->_ElementSize
);
533 if (brw
->vb
.index_bounds_valid
) {
534 start
= offset
+ min_index
* glarray
->StrideB
;
535 range
= (glarray
->StrideB
* (max_index
- min_index
) +
536 glarray
->_ElementSize
);
540 /* If we have a VB set to be uploaded for this buffer object
541 * already, reuse that VB state so that we emit fewer
545 for (k
= 0; k
< i
; k
++) {
546 const struct gl_vertex_array
*other
= brw
->vb
.enabled
[k
]->glarray
;
547 if (glarray
->BufferObj
== other
->BufferObj
&&
548 glarray
->StrideB
== other
->StrideB
&&
549 glarray
->InstanceDivisor
== other
->InstanceDivisor
&&
550 (uintptr_t)(glarray
->Ptr
- other
->Ptr
) < glarray
->StrideB
)
552 input
->buffer
= brw
->vb
.enabled
[k
]->buffer
;
553 input
->offset
= glarray
->Ptr
- other
->Ptr
;
555 buffer_range_start
[input
->buffer
] =
556 MIN2(buffer_range_start
[input
->buffer
], start
);
557 buffer_range_end
[input
->buffer
] =
558 MAX2(buffer_range_end
[input
->buffer
], start
+ range
);
563 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[j
];
565 /* Named buffer object: Just reference its contents directly. */
566 buffer
->offset
= offset
;
567 buffer
->stride
= glarray
->StrideB
;
568 buffer
->step_rate
= glarray
->InstanceDivisor
;
569 buffer
->size
= glarray
->BufferObj
->Size
- offset
;
571 enabled_buffer
[j
] = intel_buffer
;
572 buffer_range_start
[j
] = start
;
573 buffer_range_end
[j
] = start
+ range
;
579 /* Queue the buffer object up to be uploaded in the next pass,
580 * when we've decided if we're doing interleaved or not.
582 if (nr_uploads
== 0) {
583 interleaved
= glarray
->StrideB
;
586 else if (interleaved
!= glarray
->StrideB
||
587 glarray
->InstanceDivisor
!= 0 ||
588 glarray
->Ptr
< ptr
||
589 (uintptr_t)(glarray
->Ptr
- ptr
) + glarray
->_ElementSize
> interleaved
)
591 /* If our stride is different from the first attribute's stride,
592 * or if we are using an instance divisor or if the first
593 * attribute's stride didn't cover our element, disable the
594 * interleaved upload optimization. The second case can most
595 * commonly occur in cases where there is a single vertex and, for
596 * example, the data is stored on the application's stack.
598 * NOTE: This will also disable the optimization in cases where
599 * the data is in a different order than the array indices.
603 * glVertexAttribPointer(0, 4, GL_FLOAT, 32, &data[4]);
604 * glVertexAttribPointer(1, 4, GL_FLOAT, 32, &data[0]);
609 upload
[nr_uploads
++] = input
;
613 /* Now that we've set up all of the buffers, we walk through and reference
614 * each of them. We do this late so that we get the right size in each
615 * buffer and don't reference too little data.
617 for (i
= 0; i
< j
; i
++) {
618 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[i
];
622 const uint32_t start
= buffer_range_start
[i
];
623 const uint32_t range
= buffer_range_end
[i
] - buffer_range_start
[i
];
625 buffer
->bo
= intel_bufferobj_buffer(brw
, enabled_buffer
[i
], start
, range
);
626 brw_bo_reference(buffer
->bo
);
629 /* If we need to upload all the arrays, then we can trim those arrays to
630 * only the used elements [min_index, max_index] so long as we adjust all
631 * the values used in the 3DPRIMITIVE i.e. by setting the vertex bias.
633 brw
->vb
.start_vertex_bias
= 0;
635 if (nr_uploads
== brw
->vb
.nr_enabled
) {
636 brw
->vb
.start_vertex_bias
= -delta
;
640 /* Handle any arrays to be uploaded. */
641 if (nr_uploads
> 1) {
643 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[j
];
644 /* All uploads are interleaved, so upload the arrays together as
645 * interleaved. First, upload the contents and set up upload[0].
647 copy_array_to_vbo_array(brw
, upload
[0], min_index
, max_index
,
648 buffer
, interleaved
);
649 buffer
->offset
-= delta
* interleaved
;
650 buffer
->size
+= delta
* interleaved
;
652 for (i
= 0; i
< nr_uploads
; i
++) {
653 /* Then, just point upload[i] at upload[0]'s buffer. */
655 ((const unsigned char *)upload
[i
]->glarray
->Ptr
- ptr
);
656 upload
[i
]->buffer
= j
;
663 /* Upload non-interleaved arrays */
664 for (i
= 0; i
< nr_uploads
; i
++) {
665 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[j
];
666 if (upload
[i
]->glarray
->InstanceDivisor
== 0) {
667 copy_array_to_vbo_array(brw
, upload
[i
], min_index
, max_index
,
668 buffer
, upload
[i
]->glarray
->_ElementSize
);
670 /* This is an instanced attribute, since its InstanceDivisor
671 * is not zero. Therefore, its data will be stepped after the
672 * instanced draw has been run InstanceDivisor times.
674 uint32_t instanced_attr_max_index
=
675 (brw
->num_instances
- 1) / upload
[i
]->glarray
->InstanceDivisor
;
676 copy_array_to_vbo_array(brw
, upload
[i
], 0, instanced_attr_max_index
,
677 buffer
, upload
[i
]->glarray
->_ElementSize
);
679 buffer
->offset
-= delta
* buffer
->stride
;
680 buffer
->size
+= delta
* buffer
->stride
;
681 buffer
->step_rate
= upload
[i
]->glarray
->InstanceDivisor
;
682 upload
[i
]->buffer
= j
++;
683 upload
[i
]->offset
= 0;
686 brw
->vb
.nr_buffers
= j
;
690 brw_prepare_shader_draw_parameters(struct brw_context
*brw
)
692 const struct brw_vs_prog_data
*vs_prog_data
=
693 brw_vs_prog_data(brw
->vs
.base
.prog_data
);
695 /* For non-indirect draws, upload gl_BaseVertex. */
696 if ((vs_prog_data
->uses_basevertex
|| vs_prog_data
->uses_baseinstance
) &&
697 brw
->draw
.draw_params_bo
== NULL
) {
698 intel_upload_data(brw
, &brw
->draw
.params
, sizeof(brw
->draw
.params
), 4,
699 &brw
->draw
.draw_params_bo
,
700 &brw
->draw
.draw_params_offset
);
703 if (vs_prog_data
->uses_drawid
) {
704 intel_upload_data(brw
, &brw
->draw
.gl_drawid
, sizeof(brw
->draw
.gl_drawid
), 4,
705 &brw
->draw
.draw_id_bo
,
706 &brw
->draw
.draw_id_offset
);
711 brw_upload_indices(struct brw_context
*brw
)
713 struct gl_context
*ctx
= &brw
->ctx
;
714 const struct _mesa_index_buffer
*index_buffer
= brw
->ib
.ib
;
716 struct brw_bo
*old_bo
= brw
->ib
.bo
;
717 struct gl_buffer_object
*bufferobj
;
721 if (index_buffer
== NULL
)
724 ib_type_size
= index_buffer
->index_size
;
725 ib_size
= index_buffer
->count
? ib_type_size
* index_buffer
->count
:
726 index_buffer
->obj
->Size
;
727 bufferobj
= index_buffer
->obj
;
729 /* Turn into a proper VBO:
731 if (!_mesa_is_bufferobj(bufferobj
)) {
732 /* Get new bufferobj, offset:
734 intel_upload_data(brw
, index_buffer
->ptr
, ib_size
, ib_type_size
,
735 &brw
->ib
.bo
, &offset
);
736 brw
->ib
.size
= brw
->ib
.bo
->size
;
738 offset
= (GLuint
) (unsigned long) index_buffer
->ptr
;
740 /* If the index buffer isn't aligned to its element size, we have to
741 * rebase it into a temporary.
743 if ((ib_type_size
- 1) & offset
) {
744 perf_debug("copying index buffer to a temporary to work around "
745 "misaligned offset %d\n", offset
);
747 GLubyte
*map
= ctx
->Driver
.MapBufferRange(ctx
,
754 intel_upload_data(brw
, map
, ib_size
, ib_type_size
,
755 &brw
->ib
.bo
, &offset
);
756 brw
->ib
.size
= brw
->ib
.bo
->size
;
758 ctx
->Driver
.UnmapBuffer(ctx
, bufferobj
, MAP_INTERNAL
);
761 intel_bufferobj_buffer(brw
, intel_buffer_object(bufferobj
),
763 if (bo
!= brw
->ib
.bo
) {
764 brw_bo_unreference(brw
->ib
.bo
);
766 brw
->ib
.size
= bufferobj
->Size
;
767 brw_bo_reference(bo
);
772 /* Use 3DPRIMITIVE's start_vertex_offset to avoid re-uploading
773 * the index buffer state when we're just moving the start index
776 brw
->ib
.start_vertex_offset
= offset
/ ib_type_size
;
778 if (brw
->ib
.bo
!= old_bo
)
779 brw
->ctx
.NewDriverState
|= BRW_NEW_INDEX_BUFFER
;
781 if (index_buffer
->index_size
!= brw
->ib
.index_size
) {
782 brw
->ib
.index_size
= index_buffer
->index_size
;
783 brw
->ctx
.NewDriverState
|= BRW_NEW_INDEX_BUFFER
;
787 const struct brw_tracked_state brw_indices
= {
790 .brw
= BRW_NEW_BLORP
|
793 .emit
= brw_upload_indices
,