2 * Copyright 2003 VMware, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "main/bufferobj.h"
27 #include "main/context.h"
28 #include "main/enums.h"
29 #include "main/macros.h"
30 #include "main/glformats.h"
33 #include "brw_defines.h"
34 #include "brw_context.h"
35 #include "brw_state.h"
37 #include "intel_batchbuffer.h"
38 #include "intel_buffer_objects.h"
40 static const GLuint double_types_float
[5] = {
42 BRW_SURFACEFORMAT_R64_FLOAT
,
43 BRW_SURFACEFORMAT_R64G64_FLOAT
,
44 BRW_SURFACEFORMAT_R64G64B64_FLOAT
,
45 BRW_SURFACEFORMAT_R64G64B64A64_FLOAT
48 static const GLuint double_types_passthru
[5] = {
50 BRW_SURFACEFORMAT_R64_PASSTHRU
,
51 BRW_SURFACEFORMAT_R64G64_PASSTHRU
,
52 BRW_SURFACEFORMAT_R64G64B64_PASSTHRU
,
53 BRW_SURFACEFORMAT_R64G64B64A64_PASSTHRU
56 static const GLuint float_types
[5] = {
58 BRW_SURFACEFORMAT_R32_FLOAT
,
59 BRW_SURFACEFORMAT_R32G32_FLOAT
,
60 BRW_SURFACEFORMAT_R32G32B32_FLOAT
,
61 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
64 static const GLuint half_float_types
[5] = {
66 BRW_SURFACEFORMAT_R16_FLOAT
,
67 BRW_SURFACEFORMAT_R16G16_FLOAT
,
68 BRW_SURFACEFORMAT_R16G16B16_FLOAT
,
69 BRW_SURFACEFORMAT_R16G16B16A16_FLOAT
72 static const GLuint fixed_point_types
[5] = {
74 BRW_SURFACEFORMAT_R32_SFIXED
,
75 BRW_SURFACEFORMAT_R32G32_SFIXED
,
76 BRW_SURFACEFORMAT_R32G32B32_SFIXED
,
77 BRW_SURFACEFORMAT_R32G32B32A32_SFIXED
,
80 static const GLuint uint_types_direct
[5] = {
82 BRW_SURFACEFORMAT_R32_UINT
,
83 BRW_SURFACEFORMAT_R32G32_UINT
,
84 BRW_SURFACEFORMAT_R32G32B32_UINT
,
85 BRW_SURFACEFORMAT_R32G32B32A32_UINT
88 static const GLuint uint_types_norm
[5] = {
90 BRW_SURFACEFORMAT_R32_UNORM
,
91 BRW_SURFACEFORMAT_R32G32_UNORM
,
92 BRW_SURFACEFORMAT_R32G32B32_UNORM
,
93 BRW_SURFACEFORMAT_R32G32B32A32_UNORM
96 static const GLuint uint_types_scale
[5] = {
98 BRW_SURFACEFORMAT_R32_USCALED
,
99 BRW_SURFACEFORMAT_R32G32_USCALED
,
100 BRW_SURFACEFORMAT_R32G32B32_USCALED
,
101 BRW_SURFACEFORMAT_R32G32B32A32_USCALED
104 static const GLuint int_types_direct
[5] = {
106 BRW_SURFACEFORMAT_R32_SINT
,
107 BRW_SURFACEFORMAT_R32G32_SINT
,
108 BRW_SURFACEFORMAT_R32G32B32_SINT
,
109 BRW_SURFACEFORMAT_R32G32B32A32_SINT
112 static const GLuint int_types_norm
[5] = {
114 BRW_SURFACEFORMAT_R32_SNORM
,
115 BRW_SURFACEFORMAT_R32G32_SNORM
,
116 BRW_SURFACEFORMAT_R32G32B32_SNORM
,
117 BRW_SURFACEFORMAT_R32G32B32A32_SNORM
120 static const GLuint int_types_scale
[5] = {
122 BRW_SURFACEFORMAT_R32_SSCALED
,
123 BRW_SURFACEFORMAT_R32G32_SSCALED
,
124 BRW_SURFACEFORMAT_R32G32B32_SSCALED
,
125 BRW_SURFACEFORMAT_R32G32B32A32_SSCALED
128 static const GLuint ushort_types_direct
[5] = {
130 BRW_SURFACEFORMAT_R16_UINT
,
131 BRW_SURFACEFORMAT_R16G16_UINT
,
132 BRW_SURFACEFORMAT_R16G16B16_UINT
,
133 BRW_SURFACEFORMAT_R16G16B16A16_UINT
136 static const GLuint ushort_types_norm
[5] = {
138 BRW_SURFACEFORMAT_R16_UNORM
,
139 BRW_SURFACEFORMAT_R16G16_UNORM
,
140 BRW_SURFACEFORMAT_R16G16B16_UNORM
,
141 BRW_SURFACEFORMAT_R16G16B16A16_UNORM
144 static const GLuint ushort_types_scale
[5] = {
146 BRW_SURFACEFORMAT_R16_USCALED
,
147 BRW_SURFACEFORMAT_R16G16_USCALED
,
148 BRW_SURFACEFORMAT_R16G16B16_USCALED
,
149 BRW_SURFACEFORMAT_R16G16B16A16_USCALED
152 static const GLuint short_types_direct
[5] = {
154 BRW_SURFACEFORMAT_R16_SINT
,
155 BRW_SURFACEFORMAT_R16G16_SINT
,
156 BRW_SURFACEFORMAT_R16G16B16_SINT
,
157 BRW_SURFACEFORMAT_R16G16B16A16_SINT
160 static const GLuint short_types_norm
[5] = {
162 BRW_SURFACEFORMAT_R16_SNORM
,
163 BRW_SURFACEFORMAT_R16G16_SNORM
,
164 BRW_SURFACEFORMAT_R16G16B16_SNORM
,
165 BRW_SURFACEFORMAT_R16G16B16A16_SNORM
168 static const GLuint short_types_scale
[5] = {
170 BRW_SURFACEFORMAT_R16_SSCALED
,
171 BRW_SURFACEFORMAT_R16G16_SSCALED
,
172 BRW_SURFACEFORMAT_R16G16B16_SSCALED
,
173 BRW_SURFACEFORMAT_R16G16B16A16_SSCALED
176 static const GLuint ubyte_types_direct
[5] = {
178 BRW_SURFACEFORMAT_R8_UINT
,
179 BRW_SURFACEFORMAT_R8G8_UINT
,
180 BRW_SURFACEFORMAT_R8G8B8_UINT
,
181 BRW_SURFACEFORMAT_R8G8B8A8_UINT
184 static const GLuint ubyte_types_norm
[5] = {
186 BRW_SURFACEFORMAT_R8_UNORM
,
187 BRW_SURFACEFORMAT_R8G8_UNORM
,
188 BRW_SURFACEFORMAT_R8G8B8_UNORM
,
189 BRW_SURFACEFORMAT_R8G8B8A8_UNORM
192 static const GLuint ubyte_types_scale
[5] = {
194 BRW_SURFACEFORMAT_R8_USCALED
,
195 BRW_SURFACEFORMAT_R8G8_USCALED
,
196 BRW_SURFACEFORMAT_R8G8B8_USCALED
,
197 BRW_SURFACEFORMAT_R8G8B8A8_USCALED
200 static const GLuint byte_types_direct
[5] = {
202 BRW_SURFACEFORMAT_R8_SINT
,
203 BRW_SURFACEFORMAT_R8G8_SINT
,
204 BRW_SURFACEFORMAT_R8G8B8_SINT
,
205 BRW_SURFACEFORMAT_R8G8B8A8_SINT
208 static const GLuint byte_types_norm
[5] = {
210 BRW_SURFACEFORMAT_R8_SNORM
,
211 BRW_SURFACEFORMAT_R8G8_SNORM
,
212 BRW_SURFACEFORMAT_R8G8B8_SNORM
,
213 BRW_SURFACEFORMAT_R8G8B8A8_SNORM
216 static const GLuint byte_types_scale
[5] = {
218 BRW_SURFACEFORMAT_R8_SSCALED
,
219 BRW_SURFACEFORMAT_R8G8_SSCALED
,
220 BRW_SURFACEFORMAT_R8G8B8_SSCALED
,
221 BRW_SURFACEFORMAT_R8G8B8A8_SSCALED
225 double_types(struct brw_context
*brw
,
229 /* From the BDW PRM, Volume 2d, page 588 (VERTEX_ELEMENT_STATE):
230 * "When SourceElementFormat is set to one of the *64*_PASSTHRU formats,
231 * 64-bit components are stored in the URB without any conversion."
232 * Also included on BDW PRM, Volume 7, page 470, table "Source Element
233 * Formats Supported in VF Unit"
235 * Previous PRMs don't include those references, so for gen7 we can't use
236 * PASSTHRU formats directly. But in any case, we prefer to return passthru
237 * even in that case, because that reflects what we want to achieve, even
238 * if we would need to workaround on gen < 8.
241 ? double_types_passthru
[size
]
242 : double_types_float
[size
]);
246 * Given vertex array type/size/format/normalized info, return
247 * the appopriate hardware surface type.
248 * Format will be GL_RGBA or possibly GL_BGRA for GLubyte[4] color arrays.
251 brw_get_vertex_surface_type(struct brw_context
*brw
,
252 const struct gl_vertex_array
*glarray
)
254 int size
= glarray
->Size
;
255 const bool is_ivybridge_or_older
=
256 brw
->gen
<= 7 && !brw
->is_baytrail
&& !brw
->is_haswell
;
258 if (unlikely(INTEL_DEBUG
& DEBUG_VERTS
))
259 fprintf(stderr
, "type %s size %d normalized %d\n",
260 _mesa_enum_to_string(glarray
->Type
),
261 glarray
->Size
, glarray
->Normalized
);
263 if (glarray
->Integer
) {
264 assert(glarray
->Format
== GL_RGBA
); /* sanity check */
265 switch (glarray
->Type
) {
266 case GL_INT
: return int_types_direct
[size
];
268 if (is_ivybridge_or_older
&& size
== 3)
269 return short_types_direct
[4];
271 return short_types_direct
[size
];
273 if (is_ivybridge_or_older
&& size
== 3)
274 return byte_types_direct
[4];
276 return byte_types_direct
[size
];
277 case GL_UNSIGNED_INT
: return uint_types_direct
[size
];
278 case GL_UNSIGNED_SHORT
:
279 if (is_ivybridge_or_older
&& size
== 3)
280 return ushort_types_direct
[4];
282 return ushort_types_direct
[size
];
283 case GL_UNSIGNED_BYTE
:
284 if (is_ivybridge_or_older
&& size
== 3)
285 return ubyte_types_direct
[4];
287 return ubyte_types_direct
[size
];
288 default: unreachable("not reached");
290 } else if (glarray
->Type
== GL_UNSIGNED_INT_10F_11F_11F_REV
) {
291 return BRW_SURFACEFORMAT_R11G11B10_FLOAT
;
292 } else if (glarray
->Normalized
) {
293 switch (glarray
->Type
) {
294 case GL_DOUBLE
: return double_types(brw
, size
, glarray
->Doubles
);
295 case GL_FLOAT
: return float_types
[size
];
297 case GL_HALF_FLOAT_OES
:
298 if (brw
->gen
< 6 && size
== 3)
299 return half_float_types
[4];
301 return half_float_types
[size
];
302 case GL_INT
: return int_types_norm
[size
];
303 case GL_SHORT
: return short_types_norm
[size
];
304 case GL_BYTE
: return byte_types_norm
[size
];
305 case GL_UNSIGNED_INT
: return uint_types_norm
[size
];
306 case GL_UNSIGNED_SHORT
: return ushort_types_norm
[size
];
307 case GL_UNSIGNED_BYTE
:
308 if (glarray
->Format
== GL_BGRA
) {
309 /* See GL_EXT_vertex_array_bgra */
311 return BRW_SURFACEFORMAT_B8G8R8A8_UNORM
;
314 return ubyte_types_norm
[size
];
317 if (brw
->gen
>= 8 || brw
->is_haswell
)
318 return fixed_point_types
[size
];
320 /* This produces GL_FIXED inputs as values between INT32_MIN and
321 * INT32_MAX, which will be scaled down by 1/65536 by the VS.
323 return int_types_scale
[size
];
324 /* See GL_ARB_vertex_type_2_10_10_10_rev.
325 * W/A: Pre-Haswell, the hardware doesn't really support the formats we'd
326 * like to use here, so upload everything as UINT and fix
329 case GL_INT_2_10_10_10_REV
:
331 if (brw
->gen
>= 8 || brw
->is_haswell
) {
332 return glarray
->Format
== GL_BGRA
333 ? BRW_SURFACEFORMAT_B10G10R10A2_SNORM
334 : BRW_SURFACEFORMAT_R10G10B10A2_SNORM
;
336 return BRW_SURFACEFORMAT_R10G10B10A2_UINT
;
337 case GL_UNSIGNED_INT_2_10_10_10_REV
:
339 if (brw
->gen
>= 8 || brw
->is_haswell
) {
340 return glarray
->Format
== GL_BGRA
341 ? BRW_SURFACEFORMAT_B10G10R10A2_UNORM
342 : BRW_SURFACEFORMAT_R10G10B10A2_UNORM
;
344 return BRW_SURFACEFORMAT_R10G10B10A2_UINT
;
345 default: unreachable("not reached");
349 /* See GL_ARB_vertex_type_2_10_10_10_rev.
350 * W/A: the hardware doesn't really support the formats we'd
351 * like to use here, so upload everything as UINT and fix
354 if (glarray
->Type
== GL_INT_2_10_10_10_REV
) {
356 if (brw
->gen
>= 8 || brw
->is_haswell
) {
357 return glarray
->Format
== GL_BGRA
358 ? BRW_SURFACEFORMAT_B10G10R10A2_SSCALED
359 : BRW_SURFACEFORMAT_R10G10B10A2_SSCALED
;
361 return BRW_SURFACEFORMAT_R10G10B10A2_UINT
;
362 } else if (glarray
->Type
== GL_UNSIGNED_INT_2_10_10_10_REV
) {
364 if (brw
->gen
>= 8 || brw
->is_haswell
) {
365 return glarray
->Format
== GL_BGRA
366 ? BRW_SURFACEFORMAT_B10G10R10A2_USCALED
367 : BRW_SURFACEFORMAT_R10G10B10A2_USCALED
;
369 return BRW_SURFACEFORMAT_R10G10B10A2_UINT
;
371 assert(glarray
->Format
== GL_RGBA
); /* sanity check */
372 switch (glarray
->Type
) {
373 case GL_DOUBLE
: return double_types(brw
, size
, glarray
->Doubles
);
374 case GL_FLOAT
: return float_types
[size
];
376 case GL_HALF_FLOAT_OES
:
377 if (brw
->gen
< 6 && size
== 3)
378 return half_float_types
[4];
380 return half_float_types
[size
];
381 case GL_INT
: return int_types_scale
[size
];
382 case GL_SHORT
: return short_types_scale
[size
];
383 case GL_BYTE
: return byte_types_scale
[size
];
384 case GL_UNSIGNED_INT
: return uint_types_scale
[size
];
385 case GL_UNSIGNED_SHORT
: return ushort_types_scale
[size
];
386 case GL_UNSIGNED_BYTE
: return ubyte_types_scale
[size
];
388 if (brw
->gen
>= 8 || brw
->is_haswell
)
389 return fixed_point_types
[size
];
391 /* This produces GL_FIXED inputs as values between INT32_MIN and
392 * INT32_MAX, which will be scaled down by 1/65536 by the VS.
394 return int_types_scale
[size
];
395 default: unreachable("not reached");
401 copy_array_to_vbo_array(struct brw_context
*brw
,
402 struct brw_vertex_element
*element
,
404 struct brw_vertex_buffer
*buffer
,
407 const int src_stride
= element
->glarray
->StrideB
;
409 /* If the source stride is zero, we just want to upload the current
410 * attribute once and set the buffer's stride to 0. There's no need
411 * to replicate it out.
413 if (src_stride
== 0) {
414 intel_upload_data(brw
, element
->glarray
->Ptr
,
415 element
->glarray
->_ElementSize
,
416 element
->glarray
->_ElementSize
,
417 &buffer
->bo
, &buffer
->offset
);
420 buffer
->size
= element
->glarray
->_ElementSize
;
424 const unsigned char *src
= element
->glarray
->Ptr
+ min
* src_stride
;
425 int count
= max
- min
+ 1;
426 GLuint size
= count
* dst_stride
;
427 uint8_t *dst
= intel_upload_space(brw
, size
, dst_stride
,
428 &buffer
->bo
, &buffer
->offset
);
430 /* The GL 4.5 spec says:
431 * "If any enabled array’s buffer binding is zero when DrawArrays or
432 * one of the other drawing commands defined in section 10.4 is called,
433 * the result is undefined."
435 * In this case, let's the dst with undefined values
438 if (dst_stride
== src_stride
) {
439 memcpy(dst
, src
, size
);
442 memcpy(dst
, src
, dst_stride
);
448 buffer
->stride
= dst_stride
;
453 brw_prepare_vertices(struct brw_context
*brw
)
455 struct gl_context
*ctx
= &brw
->ctx
;
456 /* BRW_NEW_VS_PROG_DATA */
457 const struct brw_vs_prog_data
*vs_prog_data
=
458 brw_vs_prog_data(brw
->vs
.base
.prog_data
);
459 GLbitfield64 vs_inputs
= vs_prog_data
->inputs_read
;
460 const unsigned char *ptr
= NULL
;
461 GLuint interleaved
= 0;
462 unsigned int min_index
= brw
->vb
.min_index
+ brw
->basevertex
;
463 unsigned int max_index
= brw
->vb
.max_index
+ brw
->basevertex
;
467 struct brw_vertex_element
*upload
[VERT_ATTRIB_MAX
];
468 GLuint nr_uploads
= 0;
472 * On gen6+, edge flags don't end up in the VUE (either in or out of the
473 * VS). Instead, they're uploaded as the last vertex element, and the data
474 * is passed sideband through the fixed function units. So, we need to
475 * prepare the vertex buffer for it, but it's not present in inputs_read.
477 if (brw
->gen
>= 6 && (ctx
->Polygon
.FrontMode
!= GL_FILL
||
478 ctx
->Polygon
.BackMode
!= GL_FILL
)) {
479 vs_inputs
|= VERT_BIT_EDGEFLAG
;
483 fprintf(stderr
, "%s %d..%d\n", __func__
, min_index
, max_index
);
485 /* Accumulate the list of enabled arrays. */
486 brw
->vb
.nr_enabled
= 0;
488 GLuint first
= ffsll(vs_inputs
) - 1;
490 first
- DIV_ROUND_UP(_mesa_bitcount_64(vs_prog_data
->double_inputs_read
&
491 BITFIELD64_MASK(first
)), 2);
492 struct brw_vertex_element
*input
= &brw
->vb
.inputs
[index
];
493 input
->is_dual_slot
= brw
->gen
>= 8 &&
494 (vs_prog_data
->double_inputs_read
& BITFIELD64_BIT(first
)) != 0;
495 vs_inputs
&= ~BITFIELD64_BIT(first
);
496 if (input
->is_dual_slot
)
497 vs_inputs
&= ~BITFIELD64_BIT(first
+ 1);
498 brw
->vb
.enabled
[brw
->vb
.nr_enabled
++] = input
;
501 if (brw
->vb
.nr_enabled
== 0)
504 if (brw
->vb
.nr_buffers
)
507 /* The range of data in a given buffer represented as [min, max) */
508 struct intel_buffer_object
*enabled_buffer
[VERT_ATTRIB_MAX
];
509 uint32_t buffer_range_start
[VERT_ATTRIB_MAX
];
510 uint32_t buffer_range_end
[VERT_ATTRIB_MAX
];
512 for (i
= j
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
513 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
514 const struct gl_vertex_array
*glarray
= input
->glarray
;
516 if (_mesa_is_bufferobj(glarray
->BufferObj
)) {
517 struct intel_buffer_object
*intel_buffer
=
518 intel_buffer_object(glarray
->BufferObj
);
520 const uint32_t offset
= (uintptr_t)glarray
->Ptr
;
522 /* Start with the worst case */
524 uint32_t range
= intel_buffer
->Base
.Size
;
525 if (glarray
->InstanceDivisor
) {
526 if (brw
->num_instances
) {
527 start
= offset
+ glarray
->StrideB
* brw
->baseinstance
;
528 range
= (glarray
->StrideB
* ((brw
->num_instances
- 1) /
529 glarray
->InstanceDivisor
) +
530 glarray
->_ElementSize
);
533 if (brw
->vb
.index_bounds_valid
) {
534 start
= offset
+ min_index
* glarray
->StrideB
;
535 range
= (glarray
->StrideB
* (max_index
- min_index
) +
536 glarray
->_ElementSize
);
540 /* If we have a VB set to be uploaded for this buffer object
541 * already, reuse that VB state so that we emit fewer
545 for (k
= 0; k
< i
; k
++) {
546 const struct gl_vertex_array
*other
= brw
->vb
.enabled
[k
]->glarray
;
547 if (glarray
->BufferObj
== other
->BufferObj
&&
548 glarray
->StrideB
== other
->StrideB
&&
549 glarray
->InstanceDivisor
== other
->InstanceDivisor
&&
550 (uintptr_t)(glarray
->Ptr
- other
->Ptr
) < glarray
->StrideB
)
552 input
->buffer
= brw
->vb
.enabled
[k
]->buffer
;
553 input
->offset
= glarray
->Ptr
- other
->Ptr
;
555 buffer_range_start
[input
->buffer
] =
556 MIN2(buffer_range_start
[input
->buffer
], start
);
557 buffer_range_end
[input
->buffer
] =
558 MAX2(buffer_range_end
[input
->buffer
], start
+ range
);
563 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[j
];
565 /* Named buffer object: Just reference its contents directly. */
566 buffer
->offset
= offset
;
567 buffer
->stride
= glarray
->StrideB
;
568 buffer
->step_rate
= glarray
->InstanceDivisor
;
569 buffer
->size
= glarray
->BufferObj
->Size
- offset
;
571 enabled_buffer
[j
] = intel_buffer
;
572 buffer_range_start
[j
] = start
;
573 buffer_range_end
[j
] = start
+ range
;
579 /* Queue the buffer object up to be uploaded in the next pass,
580 * when we've decided if we're doing interleaved or not.
582 if (nr_uploads
== 0) {
583 interleaved
= glarray
->StrideB
;
586 else if (interleaved
!= glarray
->StrideB
||
587 glarray
->Ptr
< ptr
||
588 (uintptr_t)(glarray
->Ptr
- ptr
) + glarray
->_ElementSize
> interleaved
)
590 /* If our stride is different from the first attribute's stride,
591 * or if the first attribute's stride didn't cover our element,
592 * disable the interleaved upload optimization. The second case
593 * can most commonly occur in cases where there is a single vertex
594 * and, for example, the data is stored on the application's
597 * NOTE: This will also disable the optimization in cases where
598 * the data is in a different order than the array indices.
602 * glVertexAttribPointer(0, 4, GL_FLOAT, 32, &data[4]);
603 * glVertexAttribPointer(1, 4, GL_FLOAT, 32, &data[0]);
608 upload
[nr_uploads
++] = input
;
612 /* Now that we've set up all of the buffers, we walk through and reference
613 * each of them. We do this late so that we get the right size in each
614 * buffer and don't reference too little data.
616 for (i
= 0; i
< j
; i
++) {
617 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[i
];
621 const uint32_t start
= buffer_range_start
[i
];
622 const uint32_t range
= buffer_range_end
[i
] - buffer_range_start
[i
];
624 buffer
->bo
= intel_bufferobj_buffer(brw
, enabled_buffer
[i
], start
, range
);
625 drm_intel_bo_reference(buffer
->bo
);
628 /* If we need to upload all the arrays, then we can trim those arrays to
629 * only the used elements [min_index, max_index] so long as we adjust all
630 * the values used in the 3DPRIMITIVE i.e. by setting the vertex bias.
632 brw
->vb
.start_vertex_bias
= 0;
634 if (nr_uploads
== brw
->vb
.nr_enabled
) {
635 brw
->vb
.start_vertex_bias
= -delta
;
639 /* Handle any arrays to be uploaded. */
640 if (nr_uploads
> 1) {
642 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[j
];
643 /* All uploads are interleaved, so upload the arrays together as
644 * interleaved. First, upload the contents and set up upload[0].
646 copy_array_to_vbo_array(brw
, upload
[0], min_index
, max_index
,
647 buffer
, interleaved
);
648 buffer
->offset
-= delta
* interleaved
;
649 buffer
->size
+= delta
* interleaved
;
651 for (i
= 0; i
< nr_uploads
; i
++) {
652 /* Then, just point upload[i] at upload[0]'s buffer. */
654 ((const unsigned char *)upload
[i
]->glarray
->Ptr
- ptr
);
655 upload
[i
]->buffer
= j
;
662 /* Upload non-interleaved arrays */
663 for (i
= 0; i
< nr_uploads
; i
++) {
664 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[j
];
665 if (upload
[i
]->glarray
->InstanceDivisor
== 0) {
666 copy_array_to_vbo_array(brw
, upload
[i
], min_index
, max_index
,
667 buffer
, upload
[i
]->glarray
->_ElementSize
);
669 /* This is an instanced attribute, since its InstanceDivisor
670 * is not zero. Therefore, its data will be stepped after the
671 * instanced draw has been run InstanceDivisor times.
673 uint32_t instanced_attr_max_index
=
674 (brw
->num_instances
- 1) / upload
[i
]->glarray
->InstanceDivisor
;
675 copy_array_to_vbo_array(brw
, upload
[i
], 0, instanced_attr_max_index
,
676 buffer
, upload
[i
]->glarray
->_ElementSize
);
678 buffer
->offset
-= delta
* buffer
->stride
;
679 buffer
->size
+= delta
* buffer
->stride
;
680 buffer
->step_rate
= upload
[i
]->glarray
->InstanceDivisor
;
681 upload
[i
]->buffer
= j
++;
682 upload
[i
]->offset
= 0;
685 brw
->vb
.nr_buffers
= j
;
689 brw_prepare_shader_draw_parameters(struct brw_context
*brw
)
691 const struct brw_vs_prog_data
*vs_prog_data
=
692 brw_vs_prog_data(brw
->vs
.base
.prog_data
);
694 /* For non-indirect draws, upload gl_BaseVertex. */
695 if ((vs_prog_data
->uses_basevertex
|| vs_prog_data
->uses_baseinstance
) &&
696 brw
->draw
.draw_params_bo
== NULL
) {
697 intel_upload_data(brw
, &brw
->draw
.params
, sizeof(brw
->draw
.params
), 4,
698 &brw
->draw
.draw_params_bo
,
699 &brw
->draw
.draw_params_offset
);
702 if (vs_prog_data
->uses_drawid
) {
703 intel_upload_data(brw
, &brw
->draw
.gl_drawid
, sizeof(brw
->draw
.gl_drawid
), 4,
704 &brw
->draw
.draw_id_bo
,
705 &brw
->draw
.draw_id_offset
);
710 * Emit a VERTEX_BUFFER_STATE entry (part of 3DSTATE_VERTEX_BUFFERS).
713 brw_emit_vertex_buffer_state(struct brw_context
*brw
,
716 unsigned start_offset
,
722 struct gl_context
*ctx
= &brw
->ctx
;
726 dw0
= buffer_nr
<< GEN6_VB0_INDEX_SHIFT
;
727 } else if (brw
->gen
>= 6) {
728 dw0
= (buffer_nr
<< GEN6_VB0_INDEX_SHIFT
) |
729 (step_rate
? GEN6_VB0_ACCESS_INSTANCEDATA
730 : GEN6_VB0_ACCESS_VERTEXDATA
);
732 dw0
= (buffer_nr
<< BRW_VB0_INDEX_SHIFT
) |
733 (step_rate
? BRW_VB0_ACCESS_INSTANCEDATA
734 : BRW_VB0_ACCESS_VERTEXDATA
);
738 dw0
|= GEN7_VB0_ADDRESS_MODIFYENABLE
;
742 dw0
|= GEN7_MOCS_L3
<< 16;
745 dw0
|= BDW_MOCS_WB
<< 16;
748 dw0
|= SKL_MOCS_WB
<< 16;
752 WARN_ONCE(stride
>= (brw
->gen
>= 5 ? 2048 : 2047),
753 "VBO stride %d too large, bad rendering may occur\n",
755 OUT_BATCH(dw0
| (stride
<< BRW_VB0_PITCH_SHIFT
));
757 OUT_RELOC64(bo
, I915_GEM_DOMAIN_VERTEX
, 0, start_offset
);
758 /* From the BSpec: 3D Pipeline Stages - 3D Pipeline Geometry -
759 * Vertex Fetch (VF) Stage - State
761 * Instead of "VBState.StartingBufferAddress + VBState.MaxIndex x
762 * VBState.BufferPitch", the address of the byte immediately beyond the
763 * last valid byte of the buffer is determined by
764 * "VBState.StartingBufferAddress + VBState.BufferSize".
766 OUT_BATCH(end_offset
- start_offset
);
767 } else if (brw
->gen
>= 5) {
768 OUT_RELOC(bo
, I915_GEM_DOMAIN_VERTEX
, 0, start_offset
);
769 /* From the BSpec: 3D Pipeline Stages - 3D Pipeline Geometry -
770 * Vertex Fetch (VF) Stage - State
772 * Instead of "VBState.StartingBufferAddress + VBState.MaxIndex x
773 * VBState.BufferPitch", the address of the byte immediately beyond the
774 * last valid byte of the buffer is determined by
775 * "VBState.EndAddress + 1".
777 OUT_RELOC(bo
, I915_GEM_DOMAIN_VERTEX
, 0, end_offset
- 1);
778 OUT_BATCH(step_rate
);
780 OUT_RELOC(bo
, I915_GEM_DOMAIN_VERTEX
, 0, start_offset
);
782 OUT_BATCH(step_rate
);
789 brw_emit_vertices(struct brw_context
*brw
)
793 brw_prepare_vertices(brw
);
794 brw_prepare_shader_draw_parameters(brw
);
796 brw_emit_query_begin(brw
);
798 const struct brw_vs_prog_data
*vs_prog_data
=
799 brw_vs_prog_data(brw
->vs
.base
.prog_data
);
801 unsigned nr_elements
= brw
->vb
.nr_enabled
;
802 if (vs_prog_data
->uses_vertexid
|| vs_prog_data
->uses_instanceid
||
803 vs_prog_data
->uses_basevertex
|| vs_prog_data
->uses_baseinstance
)
805 if (vs_prog_data
->uses_drawid
)
808 /* If the VS doesn't read any inputs (calculating vertex position from
809 * a state variable for some reason, for example), emit a single pad
810 * VERTEX_ELEMENT struct and bail.
812 * The stale VB state stays in place, but they don't do anything unless
813 * a VE loads from them.
815 if (nr_elements
== 0) {
817 OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS
<< 16) | 1);
819 OUT_BATCH((0 << GEN6_VE0_INDEX_SHIFT
) |
821 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
<< BRW_VE0_FORMAT_SHIFT
) |
822 (0 << BRW_VE0_SRC_OFFSET_SHIFT
));
824 OUT_BATCH((0 << BRW_VE0_INDEX_SHIFT
) |
826 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
<< BRW_VE0_FORMAT_SHIFT
) |
827 (0 << BRW_VE0_SRC_OFFSET_SHIFT
));
829 OUT_BATCH((BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
830 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_1_SHIFT
) |
831 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_2_SHIFT
) |
832 (BRW_VE1_COMPONENT_STORE_1_FLT
<< BRW_VE1_COMPONENT_3_SHIFT
));
837 /* Now emit VB and VEP state packets.
840 const bool uses_draw_params
=
841 vs_prog_data
->uses_basevertex
||
842 vs_prog_data
->uses_baseinstance
;
843 const unsigned nr_buffers
= brw
->vb
.nr_buffers
+
844 uses_draw_params
+ vs_prog_data
->uses_drawid
;
848 assert(nr_buffers
<= 33);
850 assert(nr_buffers
<= 17);
853 BEGIN_BATCH(1 + 4 * nr_buffers
);
854 OUT_BATCH((_3DSTATE_VERTEX_BUFFERS
<< 16) | (4 * nr_buffers
- 1));
855 for (i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
856 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[i
];
857 /* Prior to Haswell and Bay Trail we have to use 4-component formats
858 * to fake 3-component ones. In particular, we do this for
859 * half-float and 8 and 16-bit integer formats. This means that the
860 * vertex element may poke over the end of the buffer by 2 bytes.
863 (brw
->gen
<= 7 && !brw
->is_baytrail
&& !brw
->is_haswell
) * 2;
864 EMIT_VERTEX_BUFFER_STATE(brw
, i
, buffer
->bo
, buffer
->offset
,
865 buffer
->offset
+ buffer
->size
+ padding
,
866 buffer
->stride
, buffer
->step_rate
);
870 if (uses_draw_params
) {
871 EMIT_VERTEX_BUFFER_STATE(brw
, brw
->vb
.nr_buffers
,
872 brw
->draw
.draw_params_bo
,
873 brw
->draw
.draw_params_offset
,
874 brw
->draw
.draw_params_bo
->size
,
879 if (vs_prog_data
->uses_drawid
) {
880 EMIT_VERTEX_BUFFER_STATE(brw
, brw
->vb
.nr_buffers
+ 1,
881 brw
->draw
.draw_id_bo
,
882 brw
->draw
.draw_id_offset
,
883 brw
->draw
.draw_id_bo
->size
,
891 /* The hardware allows one more VERTEX_ELEMENTS than VERTEX_BUFFERS, presumably
892 * for VertexID/InstanceID.
895 assert(nr_elements
<= 34);
897 assert(nr_elements
<= 18);
900 struct brw_vertex_element
*gen6_edgeflag_input
= NULL
;
902 BEGIN_BATCH(1 + nr_elements
* 2);
903 OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS
<< 16) | (2 * nr_elements
- 1));
904 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
905 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
906 uint32_t format
= brw_get_vertex_surface_type(brw
, input
->glarray
);
907 uint32_t comp0
= BRW_VE1_COMPONENT_STORE_SRC
;
908 uint32_t comp1
= BRW_VE1_COMPONENT_STORE_SRC
;
909 uint32_t comp2
= BRW_VE1_COMPONENT_STORE_SRC
;
910 uint32_t comp3
= BRW_VE1_COMPONENT_STORE_SRC
;
912 if (input
== &brw
->vb
.inputs
[VERT_ATTRIB_EDGEFLAG
]) {
913 /* Gen6+ passes edgeflag as sideband along with the vertex, instead
914 * of in the VUE. We have to upload it sideband as the last vertex
915 * element according to the B-Spec.
918 gen6_edgeflag_input
= input
;
923 switch (input
->glarray
->Size
) {
924 case 0: comp0
= BRW_VE1_COMPONENT_STORE_0
;
925 case 1: comp1
= BRW_VE1_COMPONENT_STORE_0
;
926 case 2: comp2
= BRW_VE1_COMPONENT_STORE_0
;
927 case 3: comp3
= input
->glarray
->Integer
? BRW_VE1_COMPONENT_STORE_1_INT
928 : BRW_VE1_COMPONENT_STORE_1_FLT
;
933 OUT_BATCH((input
->buffer
<< GEN6_VE0_INDEX_SHIFT
) |
935 (format
<< BRW_VE0_FORMAT_SHIFT
) |
936 (input
->offset
<< BRW_VE0_SRC_OFFSET_SHIFT
));
938 OUT_BATCH((input
->buffer
<< BRW_VE0_INDEX_SHIFT
) |
940 (format
<< BRW_VE0_FORMAT_SHIFT
) |
941 (input
->offset
<< BRW_VE0_SRC_OFFSET_SHIFT
));
945 OUT_BATCH((comp0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
946 (comp1
<< BRW_VE1_COMPONENT_1_SHIFT
) |
947 (comp2
<< BRW_VE1_COMPONENT_2_SHIFT
) |
948 (comp3
<< BRW_VE1_COMPONENT_3_SHIFT
));
950 OUT_BATCH((comp0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
951 (comp1
<< BRW_VE1_COMPONENT_1_SHIFT
) |
952 (comp2
<< BRW_VE1_COMPONENT_2_SHIFT
) |
953 (comp3
<< BRW_VE1_COMPONENT_3_SHIFT
) |
954 ((i
* 4) << BRW_VE1_DST_OFFSET_SHIFT
));
957 if (vs_prog_data
->uses_vertexid
|| vs_prog_data
->uses_instanceid
||
958 vs_prog_data
->uses_basevertex
|| vs_prog_data
->uses_baseinstance
) {
959 uint32_t dw0
= 0, dw1
= 0;
960 uint32_t comp0
= BRW_VE1_COMPONENT_STORE_0
;
961 uint32_t comp1
= BRW_VE1_COMPONENT_STORE_0
;
962 uint32_t comp2
= BRW_VE1_COMPONENT_STORE_0
;
963 uint32_t comp3
= BRW_VE1_COMPONENT_STORE_0
;
965 if (vs_prog_data
->uses_basevertex
)
966 comp0
= BRW_VE1_COMPONENT_STORE_SRC
;
968 if (vs_prog_data
->uses_baseinstance
)
969 comp1
= BRW_VE1_COMPONENT_STORE_SRC
;
971 if (vs_prog_data
->uses_vertexid
)
972 comp2
= BRW_VE1_COMPONENT_STORE_VID
;
974 if (vs_prog_data
->uses_instanceid
)
975 comp3
= BRW_VE1_COMPONENT_STORE_IID
;
977 dw1
= (comp0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
978 (comp1
<< BRW_VE1_COMPONENT_1_SHIFT
) |
979 (comp2
<< BRW_VE1_COMPONENT_2_SHIFT
) |
980 (comp3
<< BRW_VE1_COMPONENT_3_SHIFT
);
983 dw0
|= GEN6_VE0_VALID
|
984 brw
->vb
.nr_buffers
<< GEN6_VE0_INDEX_SHIFT
|
985 BRW_SURFACEFORMAT_R32G32_UINT
<< BRW_VE0_FORMAT_SHIFT
;
987 dw0
|= BRW_VE0_VALID
|
988 brw
->vb
.nr_buffers
<< BRW_VE0_INDEX_SHIFT
|
989 BRW_SURFACEFORMAT_R32G32_UINT
<< BRW_VE0_FORMAT_SHIFT
;
990 dw1
|= (i
* 4) << BRW_VE1_DST_OFFSET_SHIFT
;
993 /* Note that for gl_VertexID, gl_InstanceID, and gl_PrimitiveID values,
994 * the format is ignored and the value is always int.
1001 if (vs_prog_data
->uses_drawid
) {
1002 uint32_t dw0
= 0, dw1
= 0;
1004 dw1
= (BRW_VE1_COMPONENT_STORE_SRC
<< BRW_VE1_COMPONENT_0_SHIFT
) |
1005 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_1_SHIFT
) |
1006 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_2_SHIFT
) |
1007 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_3_SHIFT
);
1009 if (brw
->gen
>= 6) {
1010 dw0
|= GEN6_VE0_VALID
|
1011 ((brw
->vb
.nr_buffers
+ 1) << GEN6_VE0_INDEX_SHIFT
) |
1012 (BRW_SURFACEFORMAT_R32_UINT
<< BRW_VE0_FORMAT_SHIFT
);
1014 dw0
|= BRW_VE0_VALID
|
1015 ((brw
->vb
.nr_buffers
+ 1) << BRW_VE0_INDEX_SHIFT
) |
1016 (BRW_SURFACEFORMAT_R32_UINT
<< BRW_VE0_FORMAT_SHIFT
);
1018 dw1
|= (i
* 4) << BRW_VE1_DST_OFFSET_SHIFT
;
1025 if (brw
->gen
>= 6 && gen6_edgeflag_input
) {
1027 brw_get_vertex_surface_type(brw
, gen6_edgeflag_input
->glarray
);
1029 OUT_BATCH((gen6_edgeflag_input
->buffer
<< GEN6_VE0_INDEX_SHIFT
) |
1031 GEN6_VE0_EDGE_FLAG_ENABLE
|
1032 (format
<< BRW_VE0_FORMAT_SHIFT
) |
1033 (gen6_edgeflag_input
->offset
<< BRW_VE0_SRC_OFFSET_SHIFT
));
1034 OUT_BATCH((BRW_VE1_COMPONENT_STORE_SRC
<< BRW_VE1_COMPONENT_0_SHIFT
) |
1035 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_1_SHIFT
) |
1036 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_2_SHIFT
) |
1037 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_3_SHIFT
));
1043 const struct brw_tracked_state brw_vertices
= {
1045 .mesa
= _NEW_POLYGON
,
1046 .brw
= BRW_NEW_BATCH
|
1049 BRW_NEW_VS_PROG_DATA
,
1051 .emit
= brw_emit_vertices
,
1055 brw_upload_indices(struct brw_context
*brw
)
1057 struct gl_context
*ctx
= &brw
->ctx
;
1058 const struct _mesa_index_buffer
*index_buffer
= brw
->ib
.ib
;
1060 drm_intel_bo
*old_bo
= brw
->ib
.bo
;
1061 struct gl_buffer_object
*bufferobj
;
1063 GLuint ib_type_size
;
1065 if (index_buffer
== NULL
)
1068 ib_type_size
= _mesa_sizeof_type(index_buffer
->type
);
1069 ib_size
= index_buffer
->count
? ib_type_size
* index_buffer
->count
:
1070 index_buffer
->obj
->Size
;
1071 bufferobj
= index_buffer
->obj
;
1073 /* Turn into a proper VBO:
1075 if (!_mesa_is_bufferobj(bufferobj
)) {
1076 /* Get new bufferobj, offset:
1078 intel_upload_data(brw
, index_buffer
->ptr
, ib_size
, ib_type_size
,
1079 &brw
->ib
.bo
, &offset
);
1080 brw
->ib
.size
= brw
->ib
.bo
->size
;
1082 offset
= (GLuint
) (unsigned long) index_buffer
->ptr
;
1084 /* If the index buffer isn't aligned to its element size, we have to
1085 * rebase it into a temporary.
1087 if ((ib_type_size
- 1) & offset
) {
1088 perf_debug("copying index buffer to a temporary to work around "
1089 "misaligned offset %d\n", offset
);
1091 GLubyte
*map
= ctx
->Driver
.MapBufferRange(ctx
,
1098 intel_upload_data(brw
, map
, ib_size
, ib_type_size
,
1099 &brw
->ib
.bo
, &offset
);
1100 brw
->ib
.size
= brw
->ib
.bo
->size
;
1102 ctx
->Driver
.UnmapBuffer(ctx
, bufferobj
, MAP_INTERNAL
);
1105 intel_bufferobj_buffer(brw
, intel_buffer_object(bufferobj
),
1107 if (bo
!= brw
->ib
.bo
) {
1108 drm_intel_bo_unreference(brw
->ib
.bo
);
1110 brw
->ib
.size
= bufferobj
->Size
;
1111 drm_intel_bo_reference(bo
);
1116 /* Use 3DPRIMITIVE's start_vertex_offset to avoid re-uploading
1117 * the index buffer state when we're just moving the start index
1120 brw
->ib
.start_vertex_offset
= offset
/ ib_type_size
;
1122 if (brw
->ib
.bo
!= old_bo
)
1123 brw
->ctx
.NewDriverState
|= BRW_NEW_INDEX_BUFFER
;
1125 if (index_buffer
->type
!= brw
->ib
.type
) {
1126 brw
->ib
.type
= index_buffer
->type
;
1127 brw
->ctx
.NewDriverState
|= BRW_NEW_INDEX_BUFFER
;
1131 const struct brw_tracked_state brw_indices
= {
1134 .brw
= BRW_NEW_BLORP
|
1137 .emit
= brw_upload_indices
,
1141 brw_emit_index_buffer(struct brw_context
*brw
)
1143 const struct _mesa_index_buffer
*index_buffer
= brw
->ib
.ib
;
1144 GLuint cut_index_setting
;
1146 if (index_buffer
== NULL
)
1149 if (brw
->prim_restart
.enable_cut_index
&& !brw
->is_haswell
) {
1150 cut_index_setting
= BRW_CUT_INDEX_ENABLE
;
1152 cut_index_setting
= 0;
1156 OUT_BATCH(CMD_INDEX_BUFFER
<< 16 |
1158 brw_get_index_type(index_buffer
->type
) |
1160 OUT_RELOC(brw
->ib
.bo
,
1161 I915_GEM_DOMAIN_VERTEX
, 0,
1163 OUT_RELOC(brw
->ib
.bo
,
1164 I915_GEM_DOMAIN_VERTEX
, 0,
1169 const struct brw_tracked_state brw_index_buffer
= {
1172 .brw
= BRW_NEW_BATCH
|
1174 BRW_NEW_INDEX_BUFFER
,
1176 .emit
= brw_emit_index_buffer
,