2 * Copyright 2003 VMware, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "main/bufferobj.h"
27 #include "main/context.h"
28 #include "main/enums.h"
29 #include "main/macros.h"
30 #include "main/glformats.h"
33 #include "brw_defines.h"
34 #include "brw_context.h"
35 #include "brw_state.h"
37 #include "intel_batchbuffer.h"
38 #include "intel_buffer_objects.h"
40 static const GLuint double_types_float
[5] = {
42 BRW_SURFACEFORMAT_R64_FLOAT
,
43 BRW_SURFACEFORMAT_R64G64_FLOAT
,
44 BRW_SURFACEFORMAT_R64G64B64_FLOAT
,
45 BRW_SURFACEFORMAT_R64G64B64A64_FLOAT
48 static const GLuint double_types_passthru
[5] = {
50 BRW_SURFACEFORMAT_R64_PASSTHRU
,
51 BRW_SURFACEFORMAT_R64G64_PASSTHRU
,
52 BRW_SURFACEFORMAT_R64G64B64_PASSTHRU
,
53 BRW_SURFACEFORMAT_R64G64B64A64_PASSTHRU
56 static const GLuint float_types
[5] = {
58 BRW_SURFACEFORMAT_R32_FLOAT
,
59 BRW_SURFACEFORMAT_R32G32_FLOAT
,
60 BRW_SURFACEFORMAT_R32G32B32_FLOAT
,
61 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
64 static const GLuint half_float_types
[5] = {
66 BRW_SURFACEFORMAT_R16_FLOAT
,
67 BRW_SURFACEFORMAT_R16G16_FLOAT
,
68 BRW_SURFACEFORMAT_R16G16B16_FLOAT
,
69 BRW_SURFACEFORMAT_R16G16B16A16_FLOAT
72 static const GLuint fixed_point_types
[5] = {
74 BRW_SURFACEFORMAT_R32_SFIXED
,
75 BRW_SURFACEFORMAT_R32G32_SFIXED
,
76 BRW_SURFACEFORMAT_R32G32B32_SFIXED
,
77 BRW_SURFACEFORMAT_R32G32B32A32_SFIXED
,
80 static const GLuint uint_types_direct
[5] = {
82 BRW_SURFACEFORMAT_R32_UINT
,
83 BRW_SURFACEFORMAT_R32G32_UINT
,
84 BRW_SURFACEFORMAT_R32G32B32_UINT
,
85 BRW_SURFACEFORMAT_R32G32B32A32_UINT
88 static const GLuint uint_types_norm
[5] = {
90 BRW_SURFACEFORMAT_R32_UNORM
,
91 BRW_SURFACEFORMAT_R32G32_UNORM
,
92 BRW_SURFACEFORMAT_R32G32B32_UNORM
,
93 BRW_SURFACEFORMAT_R32G32B32A32_UNORM
96 static const GLuint uint_types_scale
[5] = {
98 BRW_SURFACEFORMAT_R32_USCALED
,
99 BRW_SURFACEFORMAT_R32G32_USCALED
,
100 BRW_SURFACEFORMAT_R32G32B32_USCALED
,
101 BRW_SURFACEFORMAT_R32G32B32A32_USCALED
104 static const GLuint int_types_direct
[5] = {
106 BRW_SURFACEFORMAT_R32_SINT
,
107 BRW_SURFACEFORMAT_R32G32_SINT
,
108 BRW_SURFACEFORMAT_R32G32B32_SINT
,
109 BRW_SURFACEFORMAT_R32G32B32A32_SINT
112 static const GLuint int_types_norm
[5] = {
114 BRW_SURFACEFORMAT_R32_SNORM
,
115 BRW_SURFACEFORMAT_R32G32_SNORM
,
116 BRW_SURFACEFORMAT_R32G32B32_SNORM
,
117 BRW_SURFACEFORMAT_R32G32B32A32_SNORM
120 static const GLuint int_types_scale
[5] = {
122 BRW_SURFACEFORMAT_R32_SSCALED
,
123 BRW_SURFACEFORMAT_R32G32_SSCALED
,
124 BRW_SURFACEFORMAT_R32G32B32_SSCALED
,
125 BRW_SURFACEFORMAT_R32G32B32A32_SSCALED
128 static const GLuint ushort_types_direct
[5] = {
130 BRW_SURFACEFORMAT_R16_UINT
,
131 BRW_SURFACEFORMAT_R16G16_UINT
,
132 BRW_SURFACEFORMAT_R16G16B16_UINT
,
133 BRW_SURFACEFORMAT_R16G16B16A16_UINT
136 static const GLuint ushort_types_norm
[5] = {
138 BRW_SURFACEFORMAT_R16_UNORM
,
139 BRW_SURFACEFORMAT_R16G16_UNORM
,
140 BRW_SURFACEFORMAT_R16G16B16_UNORM
,
141 BRW_SURFACEFORMAT_R16G16B16A16_UNORM
144 static const GLuint ushort_types_scale
[5] = {
146 BRW_SURFACEFORMAT_R16_USCALED
,
147 BRW_SURFACEFORMAT_R16G16_USCALED
,
148 BRW_SURFACEFORMAT_R16G16B16_USCALED
,
149 BRW_SURFACEFORMAT_R16G16B16A16_USCALED
152 static const GLuint short_types_direct
[5] = {
154 BRW_SURFACEFORMAT_R16_SINT
,
155 BRW_SURFACEFORMAT_R16G16_SINT
,
156 BRW_SURFACEFORMAT_R16G16B16_SINT
,
157 BRW_SURFACEFORMAT_R16G16B16A16_SINT
160 static const GLuint short_types_norm
[5] = {
162 BRW_SURFACEFORMAT_R16_SNORM
,
163 BRW_SURFACEFORMAT_R16G16_SNORM
,
164 BRW_SURFACEFORMAT_R16G16B16_SNORM
,
165 BRW_SURFACEFORMAT_R16G16B16A16_SNORM
168 static const GLuint short_types_scale
[5] = {
170 BRW_SURFACEFORMAT_R16_SSCALED
,
171 BRW_SURFACEFORMAT_R16G16_SSCALED
,
172 BRW_SURFACEFORMAT_R16G16B16_SSCALED
,
173 BRW_SURFACEFORMAT_R16G16B16A16_SSCALED
176 static const GLuint ubyte_types_direct
[5] = {
178 BRW_SURFACEFORMAT_R8_UINT
,
179 BRW_SURFACEFORMAT_R8G8_UINT
,
180 BRW_SURFACEFORMAT_R8G8B8_UINT
,
181 BRW_SURFACEFORMAT_R8G8B8A8_UINT
184 static const GLuint ubyte_types_norm
[5] = {
186 BRW_SURFACEFORMAT_R8_UNORM
,
187 BRW_SURFACEFORMAT_R8G8_UNORM
,
188 BRW_SURFACEFORMAT_R8G8B8_UNORM
,
189 BRW_SURFACEFORMAT_R8G8B8A8_UNORM
192 static const GLuint ubyte_types_scale
[5] = {
194 BRW_SURFACEFORMAT_R8_USCALED
,
195 BRW_SURFACEFORMAT_R8G8_USCALED
,
196 BRW_SURFACEFORMAT_R8G8B8_USCALED
,
197 BRW_SURFACEFORMAT_R8G8B8A8_USCALED
200 static const GLuint byte_types_direct
[5] = {
202 BRW_SURFACEFORMAT_R8_SINT
,
203 BRW_SURFACEFORMAT_R8G8_SINT
,
204 BRW_SURFACEFORMAT_R8G8B8_SINT
,
205 BRW_SURFACEFORMAT_R8G8B8A8_SINT
208 static const GLuint byte_types_norm
[5] = {
210 BRW_SURFACEFORMAT_R8_SNORM
,
211 BRW_SURFACEFORMAT_R8G8_SNORM
,
212 BRW_SURFACEFORMAT_R8G8B8_SNORM
,
213 BRW_SURFACEFORMAT_R8G8B8A8_SNORM
216 static const GLuint byte_types_scale
[5] = {
218 BRW_SURFACEFORMAT_R8_SSCALED
,
219 BRW_SURFACEFORMAT_R8G8_SSCALED
,
220 BRW_SURFACEFORMAT_R8G8B8_SSCALED
,
221 BRW_SURFACEFORMAT_R8G8B8A8_SSCALED
225 double_types(struct brw_context
*brw
,
229 /* From the BDW PRM, Volume 2d, page 588 (VERTEX_ELEMENT_STATE):
230 * "When SourceElementFormat is set to one of the *64*_PASSTHRU formats,
231 * 64-bit components are stored in the URB without any conversion."
232 * Also included on BDW PRM, Volume 7, page 470, table "Source Element
233 * Formats Supported in VF Unit"
234 * Previous PRMs don't include those references.
236 return (brw
->gen
>= 8 && doubles
237 ? double_types_passthru
[size
]
238 : double_types_float
[size
]);
242 * Given vertex array type/size/format/normalized info, return
243 * the appopriate hardware surface type.
244 * Format will be GL_RGBA or possibly GL_BGRA for GLubyte[4] color arrays.
247 brw_get_vertex_surface_type(struct brw_context
*brw
,
248 const struct gl_client_array
*glarray
)
250 int size
= glarray
->Size
;
251 const bool is_ivybridge_or_older
=
252 brw
->gen
<= 7 && !brw
->is_baytrail
&& !brw
->is_haswell
;
254 if (unlikely(INTEL_DEBUG
& DEBUG_VERTS
))
255 fprintf(stderr
, "type %s size %d normalized %d\n",
256 _mesa_enum_to_string(glarray
->Type
),
257 glarray
->Size
, glarray
->Normalized
);
259 if (glarray
->Integer
) {
260 assert(glarray
->Format
== GL_RGBA
); /* sanity check */
261 switch (glarray
->Type
) {
262 case GL_INT
: return int_types_direct
[size
];
264 if (is_ivybridge_or_older
&& size
== 3)
265 return short_types_direct
[4];
267 return short_types_direct
[size
];
269 if (is_ivybridge_or_older
&& size
== 3)
270 return byte_types_direct
[4];
272 return byte_types_direct
[size
];
273 case GL_UNSIGNED_INT
: return uint_types_direct
[size
];
274 case GL_UNSIGNED_SHORT
:
275 if (is_ivybridge_or_older
&& size
== 3)
276 return ushort_types_direct
[4];
278 return ushort_types_direct
[size
];
279 case GL_UNSIGNED_BYTE
:
280 if (is_ivybridge_or_older
&& size
== 3)
281 return ubyte_types_direct
[4];
283 return ubyte_types_direct
[size
];
284 default: unreachable("not reached");
286 } else if (glarray
->Type
== GL_UNSIGNED_INT_10F_11F_11F_REV
) {
287 return BRW_SURFACEFORMAT_R11G11B10_FLOAT
;
288 } else if (glarray
->Normalized
) {
289 switch (glarray
->Type
) {
290 case GL_DOUBLE
: return double_types(brw
, size
, glarray
->Doubles
);
291 case GL_FLOAT
: return float_types
[size
];
293 if (brw
->gen
< 6 && size
== 3)
294 return half_float_types
[4];
296 return half_float_types
[size
];
297 case GL_INT
: return int_types_norm
[size
];
298 case GL_SHORT
: return short_types_norm
[size
];
299 case GL_BYTE
: return byte_types_norm
[size
];
300 case GL_UNSIGNED_INT
: return uint_types_norm
[size
];
301 case GL_UNSIGNED_SHORT
: return ushort_types_norm
[size
];
302 case GL_UNSIGNED_BYTE
:
303 if (glarray
->Format
== GL_BGRA
) {
304 /* See GL_EXT_vertex_array_bgra */
306 return BRW_SURFACEFORMAT_B8G8R8A8_UNORM
;
309 return ubyte_types_norm
[size
];
312 if (brw
->gen
>= 8 || brw
->is_haswell
)
313 return fixed_point_types
[size
];
315 /* This produces GL_FIXED inputs as values between INT32_MIN and
316 * INT32_MAX, which will be scaled down by 1/65536 by the VS.
318 return int_types_scale
[size
];
319 /* See GL_ARB_vertex_type_2_10_10_10_rev.
320 * W/A: Pre-Haswell, the hardware doesn't really support the formats we'd
321 * like to use here, so upload everything as UINT and fix
324 case GL_INT_2_10_10_10_REV
:
326 if (brw
->gen
>= 8 || brw
->is_haswell
) {
327 return glarray
->Format
== GL_BGRA
328 ? BRW_SURFACEFORMAT_B10G10R10A2_SNORM
329 : BRW_SURFACEFORMAT_R10G10B10A2_SNORM
;
331 return BRW_SURFACEFORMAT_R10G10B10A2_UINT
;
332 case GL_UNSIGNED_INT_2_10_10_10_REV
:
334 if (brw
->gen
>= 8 || brw
->is_haswell
) {
335 return glarray
->Format
== GL_BGRA
336 ? BRW_SURFACEFORMAT_B10G10R10A2_UNORM
337 : BRW_SURFACEFORMAT_R10G10B10A2_UNORM
;
339 return BRW_SURFACEFORMAT_R10G10B10A2_UINT
;
340 default: unreachable("not reached");
344 /* See GL_ARB_vertex_type_2_10_10_10_rev.
345 * W/A: the hardware doesn't really support the formats we'd
346 * like to use here, so upload everything as UINT and fix
349 if (glarray
->Type
== GL_INT_2_10_10_10_REV
) {
351 if (brw
->gen
>= 8 || brw
->is_haswell
) {
352 return glarray
->Format
== GL_BGRA
353 ? BRW_SURFACEFORMAT_B10G10R10A2_SSCALED
354 : BRW_SURFACEFORMAT_R10G10B10A2_SSCALED
;
356 return BRW_SURFACEFORMAT_R10G10B10A2_UINT
;
357 } else if (glarray
->Type
== GL_UNSIGNED_INT_2_10_10_10_REV
) {
359 if (brw
->gen
>= 8 || brw
->is_haswell
) {
360 return glarray
->Format
== GL_BGRA
361 ? BRW_SURFACEFORMAT_B10G10R10A2_USCALED
362 : BRW_SURFACEFORMAT_R10G10B10A2_USCALED
;
364 return BRW_SURFACEFORMAT_R10G10B10A2_UINT
;
366 assert(glarray
->Format
== GL_RGBA
); /* sanity check */
367 switch (glarray
->Type
) {
368 case GL_DOUBLE
: return double_types(brw
, size
, glarray
->Doubles
);
369 case GL_FLOAT
: return float_types
[size
];
371 if (brw
->gen
< 6 && size
== 3)
372 return half_float_types
[4];
374 return half_float_types
[size
];
375 case GL_INT
: return int_types_scale
[size
];
376 case GL_SHORT
: return short_types_scale
[size
];
377 case GL_BYTE
: return byte_types_scale
[size
];
378 case GL_UNSIGNED_INT
: return uint_types_scale
[size
];
379 case GL_UNSIGNED_SHORT
: return ushort_types_scale
[size
];
380 case GL_UNSIGNED_BYTE
: return ubyte_types_scale
[size
];
382 if (brw
->gen
>= 8 || brw
->is_haswell
)
383 return fixed_point_types
[size
];
385 /* This produces GL_FIXED inputs as values between INT32_MIN and
386 * INT32_MAX, which will be scaled down by 1/65536 by the VS.
388 return int_types_scale
[size
];
389 default: unreachable("not reached");
395 copy_array_to_vbo_array(struct brw_context
*brw
,
396 struct brw_vertex_element
*element
,
398 struct brw_vertex_buffer
*buffer
,
401 const int src_stride
= element
->glarray
->StrideB
;
403 /* If the source stride is zero, we just want to upload the current
404 * attribute once and set the buffer's stride to 0. There's no need
405 * to replicate it out.
407 if (src_stride
== 0) {
408 intel_upload_data(brw
, element
->glarray
->Ptr
,
409 element
->glarray
->_ElementSize
,
410 element
->glarray
->_ElementSize
,
411 &buffer
->bo
, &buffer
->offset
);
414 buffer
->size
= element
->glarray
->_ElementSize
;
418 const unsigned char *src
= element
->glarray
->Ptr
+ min
* src_stride
;
419 int count
= max
- min
+ 1;
420 GLuint size
= count
* dst_stride
;
421 uint8_t *dst
= intel_upload_space(brw
, size
, dst_stride
,
422 &buffer
->bo
, &buffer
->offset
);
424 if (dst_stride
== src_stride
) {
425 memcpy(dst
, src
, size
);
428 memcpy(dst
, src
, dst_stride
);
433 buffer
->stride
= dst_stride
;
438 brw_prepare_vertices(struct brw_context
*brw
)
440 struct gl_context
*ctx
= &brw
->ctx
;
441 /* BRW_NEW_VS_PROG_DATA */
442 GLbitfield64 vs_inputs
= brw
->vs
.prog_data
->inputs_read
;
443 const unsigned char *ptr
= NULL
;
444 GLuint interleaved
= 0;
445 unsigned int min_index
= brw
->vb
.min_index
+ brw
->basevertex
;
446 unsigned int max_index
= brw
->vb
.max_index
+ brw
->basevertex
;
450 struct brw_vertex_element
*upload
[VERT_ATTRIB_MAX
];
451 GLuint nr_uploads
= 0;
455 * On gen6+, edge flags don't end up in the VUE (either in or out of the
456 * VS). Instead, they're uploaded as the last vertex element, and the data
457 * is passed sideband through the fixed function units. So, we need to
458 * prepare the vertex buffer for it, but it's not present in inputs_read.
460 if (brw
->gen
>= 6 && (ctx
->Polygon
.FrontMode
!= GL_FILL
||
461 ctx
->Polygon
.BackMode
!= GL_FILL
)) {
462 vs_inputs
|= VERT_BIT_EDGEFLAG
;
466 fprintf(stderr
, "%s %d..%d\n", __func__
, min_index
, max_index
);
468 /* Accumulate the list of enabled arrays. */
469 brw
->vb
.nr_enabled
= 0;
471 GLuint index
= ffsll(vs_inputs
) - 1;
472 struct brw_vertex_element
*input
= &brw
->vb
.inputs
[index
];
474 vs_inputs
&= ~BITFIELD64_BIT(index
);
475 brw
->vb
.enabled
[brw
->vb
.nr_enabled
++] = input
;
478 if (brw
->vb
.nr_enabled
== 0)
481 if (brw
->vb
.nr_buffers
)
484 /* The range of data in a given buffer represented as [min, max) */
485 struct intel_buffer_object
*enabled_buffer
[VERT_ATTRIB_MAX
];
486 uint32_t buffer_range_start
[VERT_ATTRIB_MAX
];
487 uint32_t buffer_range_end
[VERT_ATTRIB_MAX
];
489 for (i
= j
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
490 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
491 const struct gl_client_array
*glarray
= input
->glarray
;
493 if (_mesa_is_bufferobj(glarray
->BufferObj
)) {
494 struct intel_buffer_object
*intel_buffer
=
495 intel_buffer_object(glarray
->BufferObj
);
497 const uint32_t offset
= (uintptr_t)glarray
->Ptr
;
499 /* Start with the worst case */
501 uint32_t range
= intel_buffer
->Base
.Size
;
502 if (glarray
->InstanceDivisor
) {
503 if (brw
->num_instances
) {
504 start
= offset
+ glarray
->StrideB
* brw
->baseinstance
;
505 range
= (glarray
->StrideB
* ((brw
->num_instances
- 1) /
506 glarray
->InstanceDivisor
) +
507 glarray
->_ElementSize
);
510 if (brw
->vb
.index_bounds_valid
) {
511 start
= offset
+ min_index
* glarray
->StrideB
;
512 range
= (glarray
->StrideB
* (max_index
- min_index
) +
513 glarray
->_ElementSize
);
517 /* If we have a VB set to be uploaded for this buffer object
518 * already, reuse that VB state so that we emit fewer
522 for (k
= 0; k
< i
; k
++) {
523 const struct gl_client_array
*other
= brw
->vb
.enabled
[k
]->glarray
;
524 if (glarray
->BufferObj
== other
->BufferObj
&&
525 glarray
->StrideB
== other
->StrideB
&&
526 glarray
->InstanceDivisor
== other
->InstanceDivisor
&&
527 (uintptr_t)(glarray
->Ptr
- other
->Ptr
) < glarray
->StrideB
)
529 input
->buffer
= brw
->vb
.enabled
[k
]->buffer
;
530 input
->offset
= glarray
->Ptr
- other
->Ptr
;
532 buffer_range_start
[input
->buffer
] =
533 MIN2(buffer_range_start
[input
->buffer
], start
);
534 buffer_range_end
[input
->buffer
] =
535 MAX2(buffer_range_end
[input
->buffer
], start
+ range
);
540 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[j
];
542 /* Named buffer object: Just reference its contents directly. */
543 buffer
->offset
= offset
;
544 buffer
->stride
= glarray
->StrideB
;
545 buffer
->step_rate
= glarray
->InstanceDivisor
;
546 buffer
->size
= glarray
->BufferObj
->Size
- offset
;
548 enabled_buffer
[j
] = intel_buffer
;
549 buffer_range_start
[j
] = start
;
550 buffer_range_end
[j
] = start
+ range
;
556 /* This is a common place to reach if the user mistakenly supplies
557 * a pointer in place of a VBO offset. If we just let it go through,
558 * we may end up dereferencing a pointer beyond the bounds of the
561 * The VBO spec allows application termination in this case, and it's
562 * probably a service to the poor programmer to do so rather than
563 * trying to just not render.
565 assert(input
->offset
< intel_buffer
->Base
.Size
);
567 /* Queue the buffer object up to be uploaded in the next pass,
568 * when we've decided if we're doing interleaved or not.
570 if (nr_uploads
== 0) {
571 interleaved
= glarray
->StrideB
;
574 else if (interleaved
!= glarray
->StrideB
||
575 glarray
->Ptr
< ptr
||
576 (uintptr_t)(glarray
->Ptr
- ptr
) + glarray
->_ElementSize
> interleaved
)
578 /* If our stride is different from the first attribute's stride,
579 * or if the first attribute's stride didn't cover our element,
580 * disable the interleaved upload optimization. The second case
581 * can most commonly occur in cases where there is a single vertex
582 * and, for example, the data is stored on the application's
585 * NOTE: This will also disable the optimization in cases where
586 * the data is in a different order than the array indices.
590 * glVertexAttribPointer(0, 4, GL_FLOAT, 32, &data[4]);
591 * glVertexAttribPointer(1, 4, GL_FLOAT, 32, &data[0]);
596 upload
[nr_uploads
++] = input
;
600 /* Now that we've set up all of the buffers, we walk through and reference
601 * each of them. We do this late so that we get the right size in each
602 * buffer and don't reference too little data.
604 for (i
= 0; i
< j
; i
++) {
605 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[i
];
609 const uint32_t start
= buffer_range_start
[i
];
610 const uint32_t range
= buffer_range_end
[i
] - buffer_range_start
[i
];
612 buffer
->bo
= intel_bufferobj_buffer(brw
, enabled_buffer
[i
], start
, range
);
613 drm_intel_bo_reference(buffer
->bo
);
616 /* If we need to upload all the arrays, then we can trim those arrays to
617 * only the used elements [min_index, max_index] so long as we adjust all
618 * the values used in the 3DPRIMITIVE i.e. by setting the vertex bias.
620 brw
->vb
.start_vertex_bias
= 0;
622 if (nr_uploads
== brw
->vb
.nr_enabled
) {
623 brw
->vb
.start_vertex_bias
= -delta
;
627 /* Handle any arrays to be uploaded. */
628 if (nr_uploads
> 1) {
630 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[j
];
631 /* All uploads are interleaved, so upload the arrays together as
632 * interleaved. First, upload the contents and set up upload[0].
634 copy_array_to_vbo_array(brw
, upload
[0], min_index
, max_index
,
635 buffer
, interleaved
);
636 buffer
->offset
-= delta
* interleaved
;
637 buffer
->size
+= delta
* interleaved
;
639 for (i
= 0; i
< nr_uploads
; i
++) {
640 /* Then, just point upload[i] at upload[0]'s buffer. */
642 ((const unsigned char *)upload
[i
]->glarray
->Ptr
- ptr
);
643 upload
[i
]->buffer
= j
;
650 /* Upload non-interleaved arrays */
651 for (i
= 0; i
< nr_uploads
; i
++) {
652 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[j
];
653 if (upload
[i
]->glarray
->InstanceDivisor
== 0) {
654 copy_array_to_vbo_array(brw
, upload
[i
], min_index
, max_index
,
655 buffer
, upload
[i
]->glarray
->_ElementSize
);
657 /* This is an instanced attribute, since its InstanceDivisor
658 * is not zero. Therefore, its data will be stepped after the
659 * instanced draw has been run InstanceDivisor times.
661 uint32_t instanced_attr_max_index
=
662 (brw
->num_instances
- 1) / upload
[i
]->glarray
->InstanceDivisor
;
663 copy_array_to_vbo_array(brw
, upload
[i
], 0, instanced_attr_max_index
,
664 buffer
, upload
[i
]->glarray
->_ElementSize
);
666 buffer
->offset
-= delta
* buffer
->stride
;
667 buffer
->size
+= delta
* buffer
->stride
;
668 buffer
->step_rate
= upload
[i
]->glarray
->InstanceDivisor
;
669 upload
[i
]->buffer
= j
++;
670 upload
[i
]->offset
= 0;
673 brw
->vb
.nr_buffers
= j
;
677 brw_prepare_shader_draw_parameters(struct brw_context
*brw
)
679 /* For non-indirect draws, upload gl_BaseVertex. */
680 if ((brw
->vs
.prog_data
->uses_basevertex
||
681 brw
->vs
.prog_data
->uses_baseinstance
) &&
682 brw
->draw
.draw_params_bo
== NULL
) {
683 intel_upload_data(brw
, &brw
->draw
.params
, sizeof(brw
->draw
.params
), 4,
684 &brw
->draw
.draw_params_bo
,
685 &brw
->draw
.draw_params_offset
);
688 if (brw
->vs
.prog_data
->uses_drawid
) {
689 intel_upload_data(brw
, &brw
->draw
.gl_drawid
, sizeof(brw
->draw
.gl_drawid
), 4,
690 &brw
->draw
.draw_id_bo
,
691 &brw
->draw
.draw_id_offset
);
696 * Emit a VERTEX_BUFFER_STATE entry (part of 3DSTATE_VERTEX_BUFFERS).
699 emit_vertex_buffer_state(struct brw_context
*brw
,
702 unsigned bo_ending_address
,
708 struct gl_context
*ctx
= &brw
->ctx
;
712 dw0
= (buffer_nr
<< GEN6_VB0_INDEX_SHIFT
) |
713 (step_rate
? GEN6_VB0_ACCESS_INSTANCEDATA
714 : GEN6_VB0_ACCESS_VERTEXDATA
);
716 dw0
= (buffer_nr
<< BRW_VB0_INDEX_SHIFT
) |
717 (step_rate
? BRW_VB0_ACCESS_INSTANCEDATA
718 : BRW_VB0_ACCESS_VERTEXDATA
);
722 dw0
|= GEN7_VB0_ADDRESS_MODIFYENABLE
;
725 dw0
|= GEN7_MOCS_L3
<< 16;
727 WARN_ONCE(stride
>= (brw
->gen
>= 5 ? 2048 : 2047),
728 "VBO stride %d too large, bad rendering may occur\n",
730 OUT_BATCH(dw0
| (stride
<< BRW_VB0_PITCH_SHIFT
));
731 OUT_RELOC(bo
, I915_GEM_DOMAIN_VERTEX
, 0, bo_offset
);
733 OUT_RELOC(bo
, I915_GEM_DOMAIN_VERTEX
, 0, bo_ending_address
);
737 OUT_BATCH(step_rate
);
741 #define EMIT_VERTEX_BUFFER_STATE(...) __map = emit_vertex_buffer_state(__VA_ARGS__, __map)
744 brw_emit_vertices(struct brw_context
*brw
)
748 brw_prepare_vertices(brw
);
749 brw_prepare_shader_draw_parameters(brw
);
751 brw_emit_query_begin(brw
);
753 unsigned nr_elements
= brw
->vb
.nr_enabled
;
754 if (brw
->vs
.prog_data
->uses_vertexid
|| brw
->vs
.prog_data
->uses_instanceid
||
755 brw
->vs
.prog_data
->uses_basevertex
|| brw
->vs
.prog_data
->uses_baseinstance
)
757 if (brw
->vs
.prog_data
->uses_drawid
)
760 /* If the VS doesn't read any inputs (calculating vertex position from
761 * a state variable for some reason, for example), emit a single pad
762 * VERTEX_ELEMENT struct and bail.
764 * The stale VB state stays in place, but they don't do anything unless
765 * a VE loads from them.
767 if (nr_elements
== 0) {
769 OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS
<< 16) | 1);
771 OUT_BATCH((0 << GEN6_VE0_INDEX_SHIFT
) |
773 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
<< BRW_VE0_FORMAT_SHIFT
) |
774 (0 << BRW_VE0_SRC_OFFSET_SHIFT
));
776 OUT_BATCH((0 << BRW_VE0_INDEX_SHIFT
) |
778 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
<< BRW_VE0_FORMAT_SHIFT
) |
779 (0 << BRW_VE0_SRC_OFFSET_SHIFT
));
781 OUT_BATCH((BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
782 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_1_SHIFT
) |
783 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_2_SHIFT
) |
784 (BRW_VE1_COMPONENT_STORE_1_FLT
<< BRW_VE1_COMPONENT_3_SHIFT
));
789 /* Now emit VB and VEP state packets.
792 const bool uses_draw_params
=
793 brw
->vs
.prog_data
->uses_basevertex
||
794 brw
->vs
.prog_data
->uses_baseinstance
;
795 const unsigned nr_buffers
= brw
->vb
.nr_buffers
+
796 uses_draw_params
+ brw
->vs
.prog_data
->uses_drawid
;
800 assert(nr_buffers
<= 33);
802 assert(nr_buffers
<= 17);
805 BEGIN_BATCH(1 + 4 * nr_buffers
);
806 OUT_BATCH((_3DSTATE_VERTEX_BUFFERS
<< 16) | (4 * nr_buffers
- 1));
807 for (i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
808 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[i
];
809 /* Prior to Haswell and Bay Trail we have to use 4-component formats
810 * to fake 3-component ones. In particular, we do this for
811 * half-float and 8 and 16-bit integer formats. This means that the
812 * vertex element may poke over the end of the buffer by 2 bytes.
815 (brw
->gen
<= 7 && !brw
->is_baytrail
&& !brw
->is_haswell
) * 2;
816 EMIT_VERTEX_BUFFER_STATE(brw
, i
, buffer
->bo
,
817 buffer
->offset
+ buffer
->size
+ padding
- 1,
818 buffer
->offset
, buffer
->stride
,
823 if (uses_draw_params
) {
824 EMIT_VERTEX_BUFFER_STATE(brw
, brw
->vb
.nr_buffers
,
825 brw
->draw
.draw_params_bo
,
826 brw
->draw
.draw_params_bo
->size
- 1,
827 brw
->draw
.draw_params_offset
,
832 if (brw
->vs
.prog_data
->uses_drawid
) {
833 EMIT_VERTEX_BUFFER_STATE(brw
, brw
->vb
.nr_buffers
+ 1,
834 brw
->draw
.draw_id_bo
,
835 brw
->draw
.draw_id_bo
->size
- 1,
836 brw
->draw
.draw_id_offset
,
844 /* The hardware allows one more VERTEX_ELEMENTS than VERTEX_BUFFERS, presumably
845 * for VertexID/InstanceID.
848 assert(nr_elements
<= 34);
850 assert(nr_elements
<= 18);
853 struct brw_vertex_element
*gen6_edgeflag_input
= NULL
;
855 BEGIN_BATCH(1 + nr_elements
* 2);
856 OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS
<< 16) | (2 * nr_elements
- 1));
857 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
858 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
859 uint32_t format
= brw_get_vertex_surface_type(brw
, input
->glarray
);
860 uint32_t comp0
= BRW_VE1_COMPONENT_STORE_SRC
;
861 uint32_t comp1
= BRW_VE1_COMPONENT_STORE_SRC
;
862 uint32_t comp2
= BRW_VE1_COMPONENT_STORE_SRC
;
863 uint32_t comp3
= BRW_VE1_COMPONENT_STORE_SRC
;
865 if (input
== &brw
->vb
.inputs
[VERT_ATTRIB_EDGEFLAG
]) {
866 /* Gen6+ passes edgeflag as sideband along with the vertex, instead
867 * of in the VUE. We have to upload it sideband as the last vertex
868 * element according to the B-Spec.
871 gen6_edgeflag_input
= input
;
876 switch (input
->glarray
->Size
) {
877 case 0: comp0
= BRW_VE1_COMPONENT_STORE_0
;
878 case 1: comp1
= BRW_VE1_COMPONENT_STORE_0
;
879 case 2: comp2
= BRW_VE1_COMPONENT_STORE_0
;
880 case 3: comp3
= input
->glarray
->Integer
? BRW_VE1_COMPONENT_STORE_1_INT
881 : BRW_VE1_COMPONENT_STORE_1_FLT
;
886 OUT_BATCH((input
->buffer
<< GEN6_VE0_INDEX_SHIFT
) |
888 (format
<< BRW_VE0_FORMAT_SHIFT
) |
889 (input
->offset
<< BRW_VE0_SRC_OFFSET_SHIFT
));
891 OUT_BATCH((input
->buffer
<< BRW_VE0_INDEX_SHIFT
) |
893 (format
<< BRW_VE0_FORMAT_SHIFT
) |
894 (input
->offset
<< BRW_VE0_SRC_OFFSET_SHIFT
));
898 OUT_BATCH((comp0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
899 (comp1
<< BRW_VE1_COMPONENT_1_SHIFT
) |
900 (comp2
<< BRW_VE1_COMPONENT_2_SHIFT
) |
901 (comp3
<< BRW_VE1_COMPONENT_3_SHIFT
));
903 OUT_BATCH((comp0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
904 (comp1
<< BRW_VE1_COMPONENT_1_SHIFT
) |
905 (comp2
<< BRW_VE1_COMPONENT_2_SHIFT
) |
906 (comp3
<< BRW_VE1_COMPONENT_3_SHIFT
) |
907 ((i
* 4) << BRW_VE1_DST_OFFSET_SHIFT
));
910 if (brw
->vs
.prog_data
->uses_vertexid
|| brw
->vs
.prog_data
->uses_instanceid
||
911 brw
->vs
.prog_data
->uses_basevertex
|| brw
->vs
.prog_data
->uses_baseinstance
) {
912 uint32_t dw0
= 0, dw1
= 0;
913 uint32_t comp0
= BRW_VE1_COMPONENT_STORE_0
;
914 uint32_t comp1
= BRW_VE1_COMPONENT_STORE_0
;
915 uint32_t comp2
= BRW_VE1_COMPONENT_STORE_0
;
916 uint32_t comp3
= BRW_VE1_COMPONENT_STORE_0
;
918 if (brw
->vs
.prog_data
->uses_basevertex
)
919 comp0
= BRW_VE1_COMPONENT_STORE_SRC
;
921 if (brw
->vs
.prog_data
->uses_baseinstance
)
922 comp1
= BRW_VE1_COMPONENT_STORE_SRC
;
924 if (brw
->vs
.prog_data
->uses_vertexid
)
925 comp2
= BRW_VE1_COMPONENT_STORE_VID
;
927 if (brw
->vs
.prog_data
->uses_instanceid
)
928 comp3
= BRW_VE1_COMPONENT_STORE_IID
;
930 dw1
= (comp0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
931 (comp1
<< BRW_VE1_COMPONENT_1_SHIFT
) |
932 (comp2
<< BRW_VE1_COMPONENT_2_SHIFT
) |
933 (comp3
<< BRW_VE1_COMPONENT_3_SHIFT
);
936 dw0
|= GEN6_VE0_VALID
|
937 brw
->vb
.nr_buffers
<< GEN6_VE0_INDEX_SHIFT
|
938 BRW_SURFACEFORMAT_R32G32_UINT
<< BRW_VE0_FORMAT_SHIFT
;
940 dw0
|= BRW_VE0_VALID
|
941 brw
->vb
.nr_buffers
<< BRW_VE0_INDEX_SHIFT
|
942 BRW_SURFACEFORMAT_R32G32_UINT
<< BRW_VE0_FORMAT_SHIFT
;
943 dw1
|= (i
* 4) << BRW_VE1_DST_OFFSET_SHIFT
;
946 /* Note that for gl_VertexID, gl_InstanceID, and gl_PrimitiveID values,
947 * the format is ignored and the value is always int.
954 if (brw
->vs
.prog_data
->uses_drawid
) {
955 uint32_t dw0
= 0, dw1
= 0;
957 dw1
= (BRW_VE1_COMPONENT_STORE_SRC
<< BRW_VE1_COMPONENT_0_SHIFT
) |
958 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_1_SHIFT
) |
959 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_2_SHIFT
) |
960 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_3_SHIFT
);
963 dw0
|= GEN6_VE0_VALID
|
964 ((brw
->vb
.nr_buffers
+ 1) << GEN6_VE0_INDEX_SHIFT
) |
965 (BRW_SURFACEFORMAT_R32_UINT
<< BRW_VE0_FORMAT_SHIFT
);
967 dw0
|= BRW_VE0_VALID
|
968 ((brw
->vb
.nr_buffers
+ 1) << BRW_VE0_INDEX_SHIFT
) |
969 (BRW_SURFACEFORMAT_R32_UINT
<< BRW_VE0_FORMAT_SHIFT
);
971 dw1
|= (i
* 4) << BRW_VE1_DST_OFFSET_SHIFT
;
978 if (brw
->gen
>= 6 && gen6_edgeflag_input
) {
980 brw_get_vertex_surface_type(brw
, gen6_edgeflag_input
->glarray
);
982 OUT_BATCH((gen6_edgeflag_input
->buffer
<< GEN6_VE0_INDEX_SHIFT
) |
984 GEN6_VE0_EDGE_FLAG_ENABLE
|
985 (format
<< BRW_VE0_FORMAT_SHIFT
) |
986 (gen6_edgeflag_input
->offset
<< BRW_VE0_SRC_OFFSET_SHIFT
));
987 OUT_BATCH((BRW_VE1_COMPONENT_STORE_SRC
<< BRW_VE1_COMPONENT_0_SHIFT
) |
988 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_1_SHIFT
) |
989 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_2_SHIFT
) |
990 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_3_SHIFT
));
996 const struct brw_tracked_state brw_vertices
= {
998 .mesa
= _NEW_POLYGON
,
999 .brw
= BRW_NEW_BATCH
|
1002 BRW_NEW_VS_PROG_DATA
,
1004 .emit
= brw_emit_vertices
,
1008 brw_upload_indices(struct brw_context
*brw
)
1010 struct gl_context
*ctx
= &brw
->ctx
;
1011 const struct _mesa_index_buffer
*index_buffer
= brw
->ib
.ib
;
1013 drm_intel_bo
*old_bo
= brw
->ib
.bo
;
1014 struct gl_buffer_object
*bufferobj
;
1016 GLuint ib_type_size
;
1018 if (index_buffer
== NULL
)
1021 ib_type_size
= _mesa_sizeof_type(index_buffer
->type
);
1022 ib_size
= index_buffer
->count
? ib_type_size
* index_buffer
->count
:
1023 index_buffer
->obj
->Size
;
1024 bufferobj
= index_buffer
->obj
;
1026 /* Turn into a proper VBO:
1028 if (!_mesa_is_bufferobj(bufferobj
)) {
1029 /* Get new bufferobj, offset:
1031 intel_upload_data(brw
, index_buffer
->ptr
, ib_size
, ib_type_size
,
1032 &brw
->ib
.bo
, &offset
);
1033 brw
->ib
.size
= brw
->ib
.bo
->size
;
1035 offset
= (GLuint
) (unsigned long) index_buffer
->ptr
;
1037 /* If the index buffer isn't aligned to its element size, we have to
1038 * rebase it into a temporary.
1040 if ((ib_type_size
- 1) & offset
) {
1041 perf_debug("copying index buffer to a temporary to work around "
1042 "misaligned offset %d\n", offset
);
1044 GLubyte
*map
= ctx
->Driver
.MapBufferRange(ctx
,
1051 intel_upload_data(brw
, map
, ib_size
, ib_type_size
,
1052 &brw
->ib
.bo
, &offset
);
1053 brw
->ib
.size
= brw
->ib
.bo
->size
;
1055 ctx
->Driver
.UnmapBuffer(ctx
, bufferobj
, MAP_INTERNAL
);
1058 intel_bufferobj_buffer(brw
, intel_buffer_object(bufferobj
),
1060 if (bo
!= brw
->ib
.bo
) {
1061 drm_intel_bo_unreference(brw
->ib
.bo
);
1063 brw
->ib
.size
= bufferobj
->Size
;
1064 drm_intel_bo_reference(bo
);
1069 /* Use 3DPRIMITIVE's start_vertex_offset to avoid re-uploading
1070 * the index buffer state when we're just moving the start index
1073 brw
->ib
.start_vertex_offset
= offset
/ ib_type_size
;
1075 if (brw
->ib
.bo
!= old_bo
)
1076 brw
->ctx
.NewDriverState
|= BRW_NEW_INDEX_BUFFER
;
1078 if (index_buffer
->type
!= brw
->ib
.type
) {
1079 brw
->ib
.type
= index_buffer
->type
;
1080 brw
->ctx
.NewDriverState
|= BRW_NEW_INDEX_BUFFER
;
1084 const struct brw_tracked_state brw_indices
= {
1087 .brw
= BRW_NEW_BLORP
|
1090 .emit
= brw_upload_indices
,
1094 brw_emit_index_buffer(struct brw_context
*brw
)
1096 const struct _mesa_index_buffer
*index_buffer
= brw
->ib
.ib
;
1097 GLuint cut_index_setting
;
1099 if (index_buffer
== NULL
)
1102 if (brw
->prim_restart
.enable_cut_index
&& !brw
->is_haswell
) {
1103 cut_index_setting
= BRW_CUT_INDEX_ENABLE
;
1105 cut_index_setting
= 0;
1109 OUT_BATCH(CMD_INDEX_BUFFER
<< 16 |
1111 brw_get_index_type(index_buffer
->type
) |
1113 OUT_RELOC(brw
->ib
.bo
,
1114 I915_GEM_DOMAIN_VERTEX
, 0,
1116 OUT_RELOC(brw
->ib
.bo
,
1117 I915_GEM_DOMAIN_VERTEX
, 0,
1122 const struct brw_tracked_state brw_index_buffer
= {
1125 .brw
= BRW_NEW_BATCH
|
1127 BRW_NEW_INDEX_BUFFER
,
1129 .emit
= brw_emit_index_buffer
,