2 * Copyright 2003 VMware, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "main/arrayobj.h"
27 #include "main/bufferobj.h"
28 #include "main/context.h"
29 #include "main/enums.h"
30 #include "main/macros.h"
31 #include "main/glformats.h"
35 #include "brw_defines.h"
36 #include "brw_context.h"
37 #include "brw_state.h"
39 #include "intel_batchbuffer.h"
40 #include "intel_buffer_objects.h"
42 static const GLuint double_types_float
[5] = {
45 ISL_FORMAT_R64G64_FLOAT
,
46 ISL_FORMAT_R64G64B64_FLOAT
,
47 ISL_FORMAT_R64G64B64A64_FLOAT
50 static const GLuint double_types_passthru
[5] = {
52 ISL_FORMAT_R64_PASSTHRU
,
53 ISL_FORMAT_R64G64_PASSTHRU
,
54 ISL_FORMAT_R64G64B64_PASSTHRU
,
55 ISL_FORMAT_R64G64B64A64_PASSTHRU
58 static const GLuint float_types
[5] = {
61 ISL_FORMAT_R32G32_FLOAT
,
62 ISL_FORMAT_R32G32B32_FLOAT
,
63 ISL_FORMAT_R32G32B32A32_FLOAT
66 static const GLuint half_float_types
[5] = {
69 ISL_FORMAT_R16G16_FLOAT
,
70 ISL_FORMAT_R16G16B16_FLOAT
,
71 ISL_FORMAT_R16G16B16A16_FLOAT
74 static const GLuint fixed_point_types
[5] = {
76 ISL_FORMAT_R32_SFIXED
,
77 ISL_FORMAT_R32G32_SFIXED
,
78 ISL_FORMAT_R32G32B32_SFIXED
,
79 ISL_FORMAT_R32G32B32A32_SFIXED
,
82 static const GLuint uint_types_direct
[5] = {
85 ISL_FORMAT_R32G32_UINT
,
86 ISL_FORMAT_R32G32B32_UINT
,
87 ISL_FORMAT_R32G32B32A32_UINT
90 static const GLuint uint_types_norm
[5] = {
93 ISL_FORMAT_R32G32_UNORM
,
94 ISL_FORMAT_R32G32B32_UNORM
,
95 ISL_FORMAT_R32G32B32A32_UNORM
98 static const GLuint uint_types_scale
[5] = {
100 ISL_FORMAT_R32_USCALED
,
101 ISL_FORMAT_R32G32_USCALED
,
102 ISL_FORMAT_R32G32B32_USCALED
,
103 ISL_FORMAT_R32G32B32A32_USCALED
106 static const GLuint int_types_direct
[5] = {
109 ISL_FORMAT_R32G32_SINT
,
110 ISL_FORMAT_R32G32B32_SINT
,
111 ISL_FORMAT_R32G32B32A32_SINT
114 static const GLuint int_types_norm
[5] = {
116 ISL_FORMAT_R32_SNORM
,
117 ISL_FORMAT_R32G32_SNORM
,
118 ISL_FORMAT_R32G32B32_SNORM
,
119 ISL_FORMAT_R32G32B32A32_SNORM
122 static const GLuint int_types_scale
[5] = {
124 ISL_FORMAT_R32_SSCALED
,
125 ISL_FORMAT_R32G32_SSCALED
,
126 ISL_FORMAT_R32G32B32_SSCALED
,
127 ISL_FORMAT_R32G32B32A32_SSCALED
130 static const GLuint ushort_types_direct
[5] = {
133 ISL_FORMAT_R16G16_UINT
,
134 ISL_FORMAT_R16G16B16_UINT
,
135 ISL_FORMAT_R16G16B16A16_UINT
138 static const GLuint ushort_types_norm
[5] = {
140 ISL_FORMAT_R16_UNORM
,
141 ISL_FORMAT_R16G16_UNORM
,
142 ISL_FORMAT_R16G16B16_UNORM
,
143 ISL_FORMAT_R16G16B16A16_UNORM
146 static const GLuint ushort_types_scale
[5] = {
148 ISL_FORMAT_R16_USCALED
,
149 ISL_FORMAT_R16G16_USCALED
,
150 ISL_FORMAT_R16G16B16_USCALED
,
151 ISL_FORMAT_R16G16B16A16_USCALED
154 static const GLuint short_types_direct
[5] = {
157 ISL_FORMAT_R16G16_SINT
,
158 ISL_FORMAT_R16G16B16_SINT
,
159 ISL_FORMAT_R16G16B16A16_SINT
162 static const GLuint short_types_norm
[5] = {
164 ISL_FORMAT_R16_SNORM
,
165 ISL_FORMAT_R16G16_SNORM
,
166 ISL_FORMAT_R16G16B16_SNORM
,
167 ISL_FORMAT_R16G16B16A16_SNORM
170 static const GLuint short_types_scale
[5] = {
172 ISL_FORMAT_R16_SSCALED
,
173 ISL_FORMAT_R16G16_SSCALED
,
174 ISL_FORMAT_R16G16B16_SSCALED
,
175 ISL_FORMAT_R16G16B16A16_SSCALED
178 static const GLuint ubyte_types_direct
[5] = {
181 ISL_FORMAT_R8G8_UINT
,
182 ISL_FORMAT_R8G8B8_UINT
,
183 ISL_FORMAT_R8G8B8A8_UINT
186 static const GLuint ubyte_types_norm
[5] = {
189 ISL_FORMAT_R8G8_UNORM
,
190 ISL_FORMAT_R8G8B8_UNORM
,
191 ISL_FORMAT_R8G8B8A8_UNORM
194 static const GLuint ubyte_types_scale
[5] = {
196 ISL_FORMAT_R8_USCALED
,
197 ISL_FORMAT_R8G8_USCALED
,
198 ISL_FORMAT_R8G8B8_USCALED
,
199 ISL_FORMAT_R8G8B8A8_USCALED
202 static const GLuint byte_types_direct
[5] = {
205 ISL_FORMAT_R8G8_SINT
,
206 ISL_FORMAT_R8G8B8_SINT
,
207 ISL_FORMAT_R8G8B8A8_SINT
210 static const GLuint byte_types_norm
[5] = {
213 ISL_FORMAT_R8G8_SNORM
,
214 ISL_FORMAT_R8G8B8_SNORM
,
215 ISL_FORMAT_R8G8B8A8_SNORM
218 static const GLuint byte_types_scale
[5] = {
220 ISL_FORMAT_R8_SSCALED
,
221 ISL_FORMAT_R8G8_SSCALED
,
222 ISL_FORMAT_R8G8B8_SSCALED
,
223 ISL_FORMAT_R8G8B8A8_SSCALED
227 double_types(int size
, GLboolean doubles
)
229 /* From the BDW PRM, Volume 2d, page 588 (VERTEX_ELEMENT_STATE):
230 * "When SourceElementFormat is set to one of the *64*_PASSTHRU formats,
231 * 64-bit components are stored in the URB without any conversion."
232 * Also included on BDW PRM, Volume 7, page 470, table "Source Element
233 * Formats Supported in VF Unit"
235 * Previous PRMs don't include those references, so for gen7 we can't use
236 * PASSTHRU formats directly. But in any case, we prefer to return passthru
237 * even in that case, because that reflects what we want to achieve, even
238 * if we would need to workaround on gen < 8.
241 ? double_types_passthru
[size
]
242 : double_types_float
[size
]);
246 * Given vertex array type/size/format/normalized info, return
247 * the appopriate hardware surface type.
248 * Format will be GL_RGBA or possibly GL_BGRA for GLubyte[4] color arrays.
251 brw_get_vertex_surface_type(struct brw_context
*brw
,
252 const struct gl_vertex_format
*glformat
)
254 int size
= glformat
->Size
;
255 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
256 const bool is_ivybridge_or_older
=
257 devinfo
->gen
<= 7 && !devinfo
->is_baytrail
&& !devinfo
->is_haswell
;
259 if (unlikely(INTEL_DEBUG
& DEBUG_VERTS
))
260 fprintf(stderr
, "type %s size %d normalized %d\n",
261 _mesa_enum_to_string(glformat
->Type
),
262 glformat
->Size
, glformat
->Normalized
);
264 if (glformat
->Integer
) {
265 assert(glformat
->Format
== GL_RGBA
); /* sanity check */
266 switch (glformat
->Type
) {
267 case GL_INT
: return int_types_direct
[size
];
269 if (is_ivybridge_or_older
&& size
== 3)
270 return short_types_direct
[4];
272 return short_types_direct
[size
];
274 if (is_ivybridge_or_older
&& size
== 3)
275 return byte_types_direct
[4];
277 return byte_types_direct
[size
];
278 case GL_UNSIGNED_INT
: return uint_types_direct
[size
];
279 case GL_UNSIGNED_SHORT
:
280 if (is_ivybridge_or_older
&& size
== 3)
281 return ushort_types_direct
[4];
283 return ushort_types_direct
[size
];
284 case GL_UNSIGNED_BYTE
:
285 if (is_ivybridge_or_older
&& size
== 3)
286 return ubyte_types_direct
[4];
288 return ubyte_types_direct
[size
];
289 default: unreachable("not reached");
291 } else if (glformat
->Type
== GL_UNSIGNED_INT_10F_11F_11F_REV
) {
292 return ISL_FORMAT_R11G11B10_FLOAT
;
293 } else if (glformat
->Normalized
) {
294 switch (glformat
->Type
) {
295 case GL_DOUBLE
: return double_types(size
, glformat
->Doubles
);
296 case GL_FLOAT
: return float_types
[size
];
298 case GL_HALF_FLOAT_OES
:
299 if (devinfo
->gen
< 6 && size
== 3)
300 return half_float_types
[4];
302 return half_float_types
[size
];
303 case GL_INT
: return int_types_norm
[size
];
304 case GL_SHORT
: return short_types_norm
[size
];
305 case GL_BYTE
: return byte_types_norm
[size
];
306 case GL_UNSIGNED_INT
: return uint_types_norm
[size
];
307 case GL_UNSIGNED_SHORT
: return ushort_types_norm
[size
];
308 case GL_UNSIGNED_BYTE
:
309 if (glformat
->Format
== GL_BGRA
) {
310 /* See GL_EXT_vertex_array_bgra */
312 return ISL_FORMAT_B8G8R8A8_UNORM
;
315 return ubyte_types_norm
[size
];
318 if (devinfo
->gen
>= 8 || devinfo
->is_haswell
)
319 return fixed_point_types
[size
];
321 /* This produces GL_FIXED inputs as values between INT32_MIN and
322 * INT32_MAX, which will be scaled down by 1/65536 by the VS.
324 return int_types_scale
[size
];
325 /* See GL_ARB_vertex_type_2_10_10_10_rev.
326 * W/A: Pre-Haswell, the hardware doesn't really support the formats we'd
327 * like to use here, so upload everything as UINT and fix
330 case GL_INT_2_10_10_10_REV
:
332 if (devinfo
->gen
>= 8 || devinfo
->is_haswell
) {
333 return glformat
->Format
== GL_BGRA
334 ? ISL_FORMAT_B10G10R10A2_SNORM
335 : ISL_FORMAT_R10G10B10A2_SNORM
;
337 return ISL_FORMAT_R10G10B10A2_UINT
;
338 case GL_UNSIGNED_INT_2_10_10_10_REV
:
340 if (devinfo
->gen
>= 8 || devinfo
->is_haswell
) {
341 return glformat
->Format
== GL_BGRA
342 ? ISL_FORMAT_B10G10R10A2_UNORM
343 : ISL_FORMAT_R10G10B10A2_UNORM
;
345 return ISL_FORMAT_R10G10B10A2_UINT
;
346 default: unreachable("not reached");
350 /* See GL_ARB_vertex_type_2_10_10_10_rev.
351 * W/A: the hardware doesn't really support the formats we'd
352 * like to use here, so upload everything as UINT and fix
355 if (glformat
->Type
== GL_INT_2_10_10_10_REV
) {
357 if (devinfo
->gen
>= 8 || devinfo
->is_haswell
) {
358 return glformat
->Format
== GL_BGRA
359 ? ISL_FORMAT_B10G10R10A2_SSCALED
360 : ISL_FORMAT_R10G10B10A2_SSCALED
;
362 return ISL_FORMAT_R10G10B10A2_UINT
;
363 } else if (glformat
->Type
== GL_UNSIGNED_INT_2_10_10_10_REV
) {
365 if (devinfo
->gen
>= 8 || devinfo
->is_haswell
) {
366 return glformat
->Format
== GL_BGRA
367 ? ISL_FORMAT_B10G10R10A2_USCALED
368 : ISL_FORMAT_R10G10B10A2_USCALED
;
370 return ISL_FORMAT_R10G10B10A2_UINT
;
372 assert(glformat
->Format
== GL_RGBA
); /* sanity check */
373 switch (glformat
->Type
) {
374 case GL_DOUBLE
: return double_types(size
, glformat
->Doubles
);
375 case GL_FLOAT
: return float_types
[size
];
377 case GL_HALF_FLOAT_OES
:
378 if (devinfo
->gen
< 6 && size
== 3)
379 return half_float_types
[4];
381 return half_float_types
[size
];
382 case GL_INT
: return int_types_scale
[size
];
383 case GL_SHORT
: return short_types_scale
[size
];
384 case GL_BYTE
: return byte_types_scale
[size
];
385 case GL_UNSIGNED_INT
: return uint_types_scale
[size
];
386 case GL_UNSIGNED_SHORT
: return ushort_types_scale
[size
];
387 case GL_UNSIGNED_BYTE
: return ubyte_types_scale
[size
];
389 if (devinfo
->gen
>= 8 || devinfo
->is_haswell
)
390 return fixed_point_types
[size
];
392 /* This produces GL_FIXED inputs as values between INT32_MIN and
393 * INT32_MAX, which will be scaled down by 1/65536 by the VS.
395 return int_types_scale
[size
];
396 default: unreachable("not reached");
402 copy_array_to_vbo_array(struct brw_context
*brw
,
403 struct brw_vertex_element
*element
,
405 struct brw_vertex_buffer
*buffer
,
408 const struct gl_vertex_buffer_binding
*glbinding
= element
->glbinding
;
409 const struct gl_array_attributes
*glattrib
= element
->glattrib
;
410 const struct gl_vertex_format
*glformat
= &glattrib
->Format
;
411 const int src_stride
= glbinding
->Stride
;
413 /* If the source stride is zero, we just want to upload the current
414 * attribute once and set the buffer's stride to 0. There's no need
415 * to replicate it out.
417 if (src_stride
== 0) {
418 brw_upload_data(&brw
->upload
, glattrib
->Ptr
, glformat
->_ElementSize
,
419 glformat
->_ElementSize
, &buffer
->bo
, &buffer
->offset
);
422 buffer
->size
= glformat
->_ElementSize
;
426 const unsigned char *src
= glattrib
->Ptr
+ min
* src_stride
;
427 int count
= max
- min
+ 1;
428 GLuint size
= count
* dst_stride
;
429 uint8_t *dst
= brw_upload_space(&brw
->upload
, size
, dst_stride
,
430 &buffer
->bo
, &buffer
->offset
);
432 /* The GL 4.5 spec says:
433 * "If any enabled array’s buffer binding is zero when DrawArrays or
434 * one of the other drawing commands defined in section 10.4 is called,
435 * the result is undefined."
437 * In this case, let's the dst with undefined values
440 if (dst_stride
== src_stride
) {
441 memcpy(dst
, src
, size
);
444 memcpy(dst
, src
, dst_stride
);
450 buffer
->stride
= dst_stride
;
455 brw_prepare_vertices(struct brw_context
*brw
)
457 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
458 struct gl_context
*ctx
= &brw
->ctx
;
459 /* BRW_NEW_VERTEX_PROGRAM */
460 const struct gl_program
*vp
= brw
->programs
[MESA_SHADER_VERTEX
];
461 /* BRW_NEW_VS_PROG_DATA */
462 const struct brw_vs_prog_data
*vs_prog_data
=
463 brw_vs_prog_data(brw
->vs
.base
.prog_data
);
464 GLbitfield64 vs_inputs
=
465 nir_get_single_slot_attribs_mask(vs_prog_data
->inputs_read
,
467 const unsigned char *ptr
= NULL
;
468 GLuint interleaved
= 0;
469 unsigned int min_index
= brw
->vb
.min_index
+ brw
->basevertex
;
470 unsigned int max_index
= brw
->vb
.max_index
+ brw
->basevertex
;
474 struct brw_vertex_element
*upload
[VERT_ATTRIB_MAX
];
475 GLuint nr_uploads
= 0;
479 * On gen6+, edge flags don't end up in the VUE (either in or out of the
480 * VS). Instead, they're uploaded as the last vertex element, and the data
481 * is passed sideband through the fixed function units. So, we need to
482 * prepare the vertex buffer for it, but it's not present in inputs_read.
484 if (devinfo
->gen
>= 6 && (ctx
->Polygon
.FrontMode
!= GL_FILL
||
485 ctx
->Polygon
.BackMode
!= GL_FILL
)) {
486 vs_inputs
|= VERT_BIT_EDGEFLAG
;
490 fprintf(stderr
, "%s %d..%d\n", __func__
, min_index
, max_index
);
492 /* Accumulate the list of enabled arrays. */
493 brw
->vb
.nr_enabled
= 0;
495 const unsigned index
= ffsll(vs_inputs
) - 1;
498 struct brw_vertex_element
*input
= &brw
->vb
.inputs
[index
];
499 input
->is_dual_slot
= (vp
->DualSlotInputs
& BITFIELD64_BIT(index
)) != 0;
500 vs_inputs
&= ~BITFIELD64_BIT(index
);
501 brw
->vb
.enabled
[brw
->vb
.nr_enabled
++] = input
;
504 if (brw
->vb
.nr_enabled
== 0)
507 if (brw
->vb
.nr_buffers
)
510 /* The range of data in a given buffer represented as [min, max) */
511 struct intel_buffer_object
*enabled_buffer
[VERT_ATTRIB_MAX
];
512 uint32_t buffer_range_start
[VERT_ATTRIB_MAX
];
513 uint32_t buffer_range_end
[VERT_ATTRIB_MAX
];
515 for (i
= j
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
516 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
517 const struct gl_vertex_buffer_binding
*glbinding
= input
->glbinding
;
518 const struct gl_array_attributes
*glattrib
= input
->glattrib
;
520 if (_mesa_is_bufferobj(glbinding
->BufferObj
)) {
521 struct intel_buffer_object
*intel_buffer
=
522 intel_buffer_object(glbinding
->BufferObj
);
524 const uint32_t offset
= _mesa_draw_binding_offset(glbinding
) +
525 _mesa_draw_attributes_relative_offset(glattrib
);
527 /* Start with the worst case */
529 uint32_t range
= intel_buffer
->Base
.Size
;
530 if (glbinding
->InstanceDivisor
) {
531 if (brw
->num_instances
) {
532 start
= offset
+ glbinding
->Stride
* brw
->baseinstance
;
533 range
= (glbinding
->Stride
* ((brw
->num_instances
- 1) /
534 glbinding
->InstanceDivisor
) +
535 glattrib
->Format
._ElementSize
);
538 if (brw
->vb
.index_bounds_valid
) {
539 start
= offset
+ min_index
* glbinding
->Stride
;
540 range
= (glbinding
->Stride
* (max_index
- min_index
) +
541 glattrib
->Format
._ElementSize
);
545 /* If we have a VB set to be uploaded for this buffer object
546 * already, reuse that VB state so that we emit fewer
550 for (k
= 0; k
< i
; k
++) {
551 struct brw_vertex_element
*other
= brw
->vb
.enabled
[k
];
552 const struct gl_vertex_buffer_binding
*obind
= other
->glbinding
;
553 const struct gl_array_attributes
*oattrib
= other
->glattrib
;
554 const uint32_t ooffset
= _mesa_draw_binding_offset(obind
) +
555 _mesa_draw_attributes_relative_offset(oattrib
);
556 if (glbinding
->BufferObj
== obind
->BufferObj
&&
557 glbinding
->Stride
== obind
->Stride
&&
558 glbinding
->InstanceDivisor
== obind
->InstanceDivisor
&&
559 (offset
- ooffset
) < glbinding
->Stride
)
561 input
->buffer
= brw
->vb
.enabled
[k
]->buffer
;
562 input
->offset
= offset
- ooffset
;
564 buffer_range_start
[input
->buffer
] =
565 MIN2(buffer_range_start
[input
->buffer
], start
);
566 buffer_range_end
[input
->buffer
] =
567 MAX2(buffer_range_end
[input
->buffer
], start
+ range
);
572 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[j
];
574 /* Named buffer object: Just reference its contents directly. */
575 buffer
->offset
= offset
;
576 buffer
->stride
= glbinding
->Stride
;
577 buffer
->step_rate
= glbinding
->InstanceDivisor
;
578 buffer
->size
= glbinding
->BufferObj
->Size
- offset
;
580 enabled_buffer
[j
] = intel_buffer
;
581 buffer_range_start
[j
] = start
;
582 buffer_range_end
[j
] = start
+ range
;
588 /* Queue the buffer object up to be uploaded in the next pass,
589 * when we've decided if we're doing interleaved or not.
591 if (nr_uploads
== 0) {
592 interleaved
= glbinding
->Stride
;
595 else if (interleaved
!= glbinding
->Stride
||
596 glbinding
->InstanceDivisor
!= 0 ||
597 glattrib
->Ptr
< ptr
||
598 (uintptr_t)(glattrib
->Ptr
- ptr
) +
599 glattrib
->Format
._ElementSize
> interleaved
)
601 /* If our stride is different from the first attribute's stride,
602 * or if we are using an instance divisor or if the first
603 * attribute's stride didn't cover our element, disable the
604 * interleaved upload optimization. The second case can most
605 * commonly occur in cases where there is a single vertex and, for
606 * example, the data is stored on the application's stack.
608 * NOTE: This will also disable the optimization in cases where
609 * the data is in a different order than the array indices.
613 * glVertexAttribPointer(0, 4, GL_FLOAT, 32, &data[4]);
614 * glVertexAttribPointer(1, 4, GL_FLOAT, 32, &data[0]);
619 upload
[nr_uploads
++] = input
;
623 /* Now that we've set up all of the buffers, we walk through and reference
624 * each of them. We do this late so that we get the right size in each
625 * buffer and don't reference too little data.
627 for (i
= 0; i
< j
; i
++) {
628 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[i
];
632 const uint32_t start
= buffer_range_start
[i
];
633 const uint32_t range
= buffer_range_end
[i
] - buffer_range_start
[i
];
635 buffer
->bo
= intel_bufferobj_buffer(brw
, enabled_buffer
[i
], start
,
637 brw_bo_reference(buffer
->bo
);
640 /* If we need to upload all the arrays, then we can trim those arrays to
641 * only the used elements [min_index, max_index] so long as we adjust all
642 * the values used in the 3DPRIMITIVE i.e. by setting the vertex bias.
644 brw
->vb
.start_vertex_bias
= 0;
646 if (nr_uploads
== brw
->vb
.nr_enabled
) {
647 brw
->vb
.start_vertex_bias
= -delta
;
651 /* Handle any arrays to be uploaded. */
652 if (nr_uploads
> 1) {
654 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[j
];
655 /* All uploads are interleaved, so upload the arrays together as
656 * interleaved. First, upload the contents and set up upload[0].
658 copy_array_to_vbo_array(brw
, upload
[0], min_index
, max_index
,
659 buffer
, interleaved
);
660 buffer
->offset
-= delta
* interleaved
;
661 buffer
->size
+= delta
* interleaved
;
662 buffer
->step_rate
= 0;
664 for (i
= 0; i
< nr_uploads
; i
++) {
665 const struct gl_array_attributes
*glattrib
= upload
[i
]->glattrib
;
666 /* Then, just point upload[i] at upload[0]'s buffer. */
667 upload
[i
]->offset
= ((const unsigned char *)glattrib
->Ptr
- ptr
);
668 upload
[i
]->buffer
= j
;
675 /* Upload non-interleaved arrays */
676 for (i
= 0; i
< nr_uploads
; i
++) {
677 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[j
];
678 const struct gl_vertex_buffer_binding
*glbinding
= upload
[i
]->glbinding
;
679 const struct gl_array_attributes
*glattrib
= upload
[i
]->glattrib
;
680 if (glbinding
->InstanceDivisor
== 0) {
681 copy_array_to_vbo_array(brw
, upload
[i
], min_index
, max_index
,
682 buffer
, glattrib
->Format
._ElementSize
);
684 /* This is an instanced attribute, since its InstanceDivisor
685 * is not zero. Therefore, its data will be stepped after the
686 * instanced draw has been run InstanceDivisor times.
688 uint32_t instanced_attr_max_index
=
689 (brw
->num_instances
- 1) / glbinding
->InstanceDivisor
;
690 copy_array_to_vbo_array(brw
, upload
[i
], 0, instanced_attr_max_index
,
691 buffer
, glattrib
->Format
._ElementSize
);
693 buffer
->offset
-= delta
* buffer
->stride
;
694 buffer
->size
+= delta
* buffer
->stride
;
695 buffer
->step_rate
= glbinding
->InstanceDivisor
;
696 upload
[i
]->buffer
= j
++;
697 upload
[i
]->offset
= 0;
700 brw
->vb
.nr_buffers
= j
;
704 brw_prepare_shader_draw_parameters(struct brw_context
*brw
)
706 const struct brw_vs_prog_data
*vs_prog_data
=
707 brw_vs_prog_data(brw
->vs
.base
.prog_data
);
709 /* For non-indirect draws, upload the shader draw parameters */
710 if ((vs_prog_data
->uses_firstvertex
|| vs_prog_data
->uses_baseinstance
) &&
711 brw
->draw
.draw_params_bo
== NULL
) {
712 brw_upload_data(&brw
->upload
,
713 &brw
->draw
.params
, sizeof(brw
->draw
.params
), 4,
714 &brw
->draw
.draw_params_bo
,
715 &brw
->draw
.draw_params_offset
);
718 if (vs_prog_data
->uses_drawid
|| vs_prog_data
->uses_is_indexed_draw
) {
719 brw_upload_data(&brw
->upload
,
720 &brw
->draw
.derived_params
, sizeof(brw
->draw
.derived_params
), 4,
721 &brw
->draw
.derived_draw_params_bo
,
722 &brw
->draw
.derived_draw_params_offset
);
727 brw_upload_indices(struct brw_context
*brw
)
729 const struct _mesa_index_buffer
*index_buffer
= brw
->ib
.ib
;
731 struct brw_bo
*old_bo
= brw
->ib
.bo
;
732 struct gl_buffer_object
*bufferobj
;
736 if (index_buffer
== NULL
)
739 ib_type_size
= index_buffer
->index_size
;
740 ib_size
= index_buffer
->count
? ib_type_size
* index_buffer
->count
:
741 index_buffer
->obj
->Size
;
742 bufferobj
= index_buffer
->obj
;
744 /* Turn into a proper VBO:
746 if (!_mesa_is_bufferobj(bufferobj
)) {
747 /* Get new bufferobj, offset:
749 brw_upload_data(&brw
->upload
, index_buffer
->ptr
, ib_size
, ib_type_size
,
750 &brw
->ib
.bo
, &offset
);
751 brw
->ib
.size
= brw
->ib
.bo
->size
;
753 offset
= (GLuint
) (unsigned long) index_buffer
->ptr
;
756 intel_bufferobj_buffer(brw
, intel_buffer_object(bufferobj
),
757 offset
, ib_size
, false);
758 if (bo
!= brw
->ib
.bo
) {
759 brw_bo_unreference(brw
->ib
.bo
);
761 brw
->ib
.size
= bufferobj
->Size
;
762 brw_bo_reference(bo
);
766 /* Use 3DPRIMITIVE's start_vertex_offset to avoid re-uploading
767 * the index buffer state when we're just moving the start index
770 brw
->ib
.start_vertex_offset
= offset
/ ib_type_size
;
772 if (brw
->ib
.bo
!= old_bo
)
773 brw
->ctx
.NewDriverState
|= BRW_NEW_INDEX_BUFFER
;
775 if (index_buffer
->index_size
!= brw
->ib
.index_size
) {
776 brw
->ib
.index_size
= index_buffer
->index_size
;
777 brw
->ctx
.NewDriverState
|= BRW_NEW_INDEX_BUFFER
;
780 /* We need to re-emit an index buffer state each time
781 * when cut index flag is changed
783 if (brw
->prim_restart
.enable_cut_index
!= brw
->ib
.enable_cut_index
) {
784 brw
->ib
.enable_cut_index
= brw
->prim_restart
.enable_cut_index
;
785 brw
->ctx
.NewDriverState
|= BRW_NEW_INDEX_BUFFER
;
789 const struct brw_tracked_state brw_indices
= {
792 .brw
= BRW_NEW_BLORP
|
795 .emit
= brw_upload_indices
,