1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 #include "main/glheader.h"
31 #include "main/bufferobj.h"
32 #include "main/context.h"
33 #include "main/enums.h"
34 #include "main/macros.h"
37 #include "brw_defines.h"
38 #include "brw_context.h"
39 #include "brw_state.h"
41 #include "intel_batchbuffer.h"
42 #include "intel_buffer_objects.h"
44 static GLuint double_types
[5] = {
46 BRW_SURFACEFORMAT_R64_FLOAT
,
47 BRW_SURFACEFORMAT_R64G64_FLOAT
,
48 BRW_SURFACEFORMAT_R64G64B64_FLOAT
,
49 BRW_SURFACEFORMAT_R64G64B64A64_FLOAT
52 static GLuint float_types
[5] = {
54 BRW_SURFACEFORMAT_R32_FLOAT
,
55 BRW_SURFACEFORMAT_R32G32_FLOAT
,
56 BRW_SURFACEFORMAT_R32G32B32_FLOAT
,
57 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
60 static GLuint half_float_types
[5] = {
62 BRW_SURFACEFORMAT_R16_FLOAT
,
63 BRW_SURFACEFORMAT_R16G16_FLOAT
,
64 BRW_SURFACEFORMAT_R16G16B16A16_FLOAT
,
65 BRW_SURFACEFORMAT_R16G16B16A16_FLOAT
68 static GLuint uint_types_direct
[5] = {
70 BRW_SURFACEFORMAT_R32_UINT
,
71 BRW_SURFACEFORMAT_R32G32_UINT
,
72 BRW_SURFACEFORMAT_R32G32B32_UINT
,
73 BRW_SURFACEFORMAT_R32G32B32A32_UINT
76 static GLuint uint_types_norm
[5] = {
78 BRW_SURFACEFORMAT_R32_UNORM
,
79 BRW_SURFACEFORMAT_R32G32_UNORM
,
80 BRW_SURFACEFORMAT_R32G32B32_UNORM
,
81 BRW_SURFACEFORMAT_R32G32B32A32_UNORM
84 static GLuint uint_types_scale
[5] = {
86 BRW_SURFACEFORMAT_R32_USCALED
,
87 BRW_SURFACEFORMAT_R32G32_USCALED
,
88 BRW_SURFACEFORMAT_R32G32B32_USCALED
,
89 BRW_SURFACEFORMAT_R32G32B32A32_USCALED
92 static GLuint int_types_direct
[5] = {
94 BRW_SURFACEFORMAT_R32_SINT
,
95 BRW_SURFACEFORMAT_R32G32_SINT
,
96 BRW_SURFACEFORMAT_R32G32B32_SINT
,
97 BRW_SURFACEFORMAT_R32G32B32A32_SINT
100 static GLuint int_types_norm
[5] = {
102 BRW_SURFACEFORMAT_R32_SNORM
,
103 BRW_SURFACEFORMAT_R32G32_SNORM
,
104 BRW_SURFACEFORMAT_R32G32B32_SNORM
,
105 BRW_SURFACEFORMAT_R32G32B32A32_SNORM
108 static GLuint int_types_scale
[5] = {
110 BRW_SURFACEFORMAT_R32_SSCALED
,
111 BRW_SURFACEFORMAT_R32G32_SSCALED
,
112 BRW_SURFACEFORMAT_R32G32B32_SSCALED
,
113 BRW_SURFACEFORMAT_R32G32B32A32_SSCALED
116 static GLuint ushort_types_direct
[5] = {
118 BRW_SURFACEFORMAT_R16_UINT
,
119 BRW_SURFACEFORMAT_R16G16_UINT
,
120 BRW_SURFACEFORMAT_R16G16B16A16_UINT
,
121 BRW_SURFACEFORMAT_R16G16B16A16_UINT
124 static GLuint ushort_types_norm
[5] = {
126 BRW_SURFACEFORMAT_R16_UNORM
,
127 BRW_SURFACEFORMAT_R16G16_UNORM
,
128 BRW_SURFACEFORMAT_R16G16B16_UNORM
,
129 BRW_SURFACEFORMAT_R16G16B16A16_UNORM
132 static GLuint ushort_types_scale
[5] = {
134 BRW_SURFACEFORMAT_R16_USCALED
,
135 BRW_SURFACEFORMAT_R16G16_USCALED
,
136 BRW_SURFACEFORMAT_R16G16B16_USCALED
,
137 BRW_SURFACEFORMAT_R16G16B16A16_USCALED
140 static GLuint short_types_direct
[5] = {
142 BRW_SURFACEFORMAT_R16_SINT
,
143 BRW_SURFACEFORMAT_R16G16_SINT
,
144 BRW_SURFACEFORMAT_R16G16B16A16_SINT
,
145 BRW_SURFACEFORMAT_R16G16B16A16_SINT
148 static GLuint short_types_norm
[5] = {
150 BRW_SURFACEFORMAT_R16_SNORM
,
151 BRW_SURFACEFORMAT_R16G16_SNORM
,
152 BRW_SURFACEFORMAT_R16G16B16_SNORM
,
153 BRW_SURFACEFORMAT_R16G16B16A16_SNORM
156 static GLuint short_types_scale
[5] = {
158 BRW_SURFACEFORMAT_R16_SSCALED
,
159 BRW_SURFACEFORMAT_R16G16_SSCALED
,
160 BRW_SURFACEFORMAT_R16G16B16_SSCALED
,
161 BRW_SURFACEFORMAT_R16G16B16A16_SSCALED
164 static GLuint ubyte_types_direct
[5] = {
166 BRW_SURFACEFORMAT_R8_UINT
,
167 BRW_SURFACEFORMAT_R8G8_UINT
,
168 BRW_SURFACEFORMAT_R8G8B8A8_UINT
,
169 BRW_SURFACEFORMAT_R8G8B8A8_UINT
172 static GLuint ubyte_types_norm
[5] = {
174 BRW_SURFACEFORMAT_R8_UNORM
,
175 BRW_SURFACEFORMAT_R8G8_UNORM
,
176 BRW_SURFACEFORMAT_R8G8B8_UNORM
,
177 BRW_SURFACEFORMAT_R8G8B8A8_UNORM
180 static GLuint ubyte_types_scale
[5] = {
182 BRW_SURFACEFORMAT_R8_USCALED
,
183 BRW_SURFACEFORMAT_R8G8_USCALED
,
184 BRW_SURFACEFORMAT_R8G8B8_USCALED
,
185 BRW_SURFACEFORMAT_R8G8B8A8_USCALED
188 static GLuint byte_types_direct
[5] = {
190 BRW_SURFACEFORMAT_R8_SINT
,
191 BRW_SURFACEFORMAT_R8G8_SINT
,
192 BRW_SURFACEFORMAT_R8G8B8A8_SINT
,
193 BRW_SURFACEFORMAT_R8G8B8A8_SINT
196 static GLuint byte_types_norm
[5] = {
198 BRW_SURFACEFORMAT_R8_SNORM
,
199 BRW_SURFACEFORMAT_R8G8_SNORM
,
200 BRW_SURFACEFORMAT_R8G8B8_SNORM
,
201 BRW_SURFACEFORMAT_R8G8B8A8_SNORM
204 static GLuint byte_types_scale
[5] = {
206 BRW_SURFACEFORMAT_R8_SSCALED
,
207 BRW_SURFACEFORMAT_R8G8_SSCALED
,
208 BRW_SURFACEFORMAT_R8G8B8_SSCALED
,
209 BRW_SURFACEFORMAT_R8G8B8A8_SSCALED
214 * Given vertex array type/size/format/normalized info, return
215 * the appopriate hardware surface type.
216 * Format will be GL_RGBA or possibly GL_BGRA for GLubyte[4] color arrays.
218 static GLuint
get_surface_type( GLenum type
, GLuint size
,
219 GLenum format
, bool normalized
, bool integer
)
221 if (unlikely(INTEL_DEBUG
& DEBUG_VERTS
))
222 printf("type %s size %d normalized %d\n",
223 _mesa_lookup_enum_by_nr(type
), size
, normalized
);
226 assert(format
== GL_RGBA
); /* sanity check */
228 case GL_INT
: return int_types_direct
[size
];
229 case GL_SHORT
: return short_types_direct
[size
];
230 case GL_BYTE
: return byte_types_direct
[size
];
231 case GL_UNSIGNED_INT
: return uint_types_direct
[size
];
232 case GL_UNSIGNED_SHORT
: return ushort_types_direct
[size
];
233 case GL_UNSIGNED_BYTE
: return ubyte_types_direct
[size
];
234 default: assert(0); return 0;
236 } else if (normalized
) {
238 case GL_DOUBLE
: return double_types
[size
];
239 case GL_FLOAT
: return float_types
[size
];
240 case GL_HALF_FLOAT
: return half_float_types
[size
];
241 case GL_INT
: return int_types_norm
[size
];
242 case GL_SHORT
: return short_types_norm
[size
];
243 case GL_BYTE
: return byte_types_norm
[size
];
244 case GL_UNSIGNED_INT
: return uint_types_norm
[size
];
245 case GL_UNSIGNED_SHORT
: return ushort_types_norm
[size
];
246 case GL_UNSIGNED_BYTE
:
247 if (format
== GL_BGRA
) {
248 /* See GL_EXT_vertex_array_bgra */
250 return BRW_SURFACEFORMAT_B8G8R8A8_UNORM
;
253 return ubyte_types_norm
[size
];
255 default: assert(0); return 0;
259 assert(format
== GL_RGBA
); /* sanity check */
261 case GL_DOUBLE
: return double_types
[size
];
262 case GL_FLOAT
: return float_types
[size
];
263 case GL_HALF_FLOAT
: return half_float_types
[size
];
264 case GL_INT
: return int_types_scale
[size
];
265 case GL_SHORT
: return short_types_scale
[size
];
266 case GL_BYTE
: return byte_types_scale
[size
];
267 case GL_UNSIGNED_INT
: return uint_types_scale
[size
];
268 case GL_UNSIGNED_SHORT
: return ushort_types_scale
[size
];
269 case GL_UNSIGNED_BYTE
: return ubyte_types_scale
[size
];
270 /* This produces GL_FIXED inputs as values between INT32_MIN and
271 * INT32_MAX, which will be scaled down by 1/65536 by the VS.
273 case GL_FIXED
: return int_types_scale
[size
];
274 default: assert(0); return 0;
280 static GLuint
get_size( GLenum type
)
283 case GL_DOUBLE
: return sizeof(GLdouble
);
284 case GL_FLOAT
: return sizeof(GLfloat
);
285 case GL_HALF_FLOAT
: return sizeof(GLhalfARB
);
286 case GL_INT
: return sizeof(GLint
);
287 case GL_SHORT
: return sizeof(GLshort
);
288 case GL_BYTE
: return sizeof(GLbyte
);
289 case GL_UNSIGNED_INT
: return sizeof(GLuint
);
290 case GL_UNSIGNED_SHORT
: return sizeof(GLushort
);
291 case GL_UNSIGNED_BYTE
: return sizeof(GLubyte
);
292 case GL_FIXED
: return sizeof(GLuint
);
293 default: assert(0); return 0;
297 static GLuint
get_index_type(GLenum type
)
300 case GL_UNSIGNED_BYTE
: return BRW_INDEX_BYTE
;
301 case GL_UNSIGNED_SHORT
: return BRW_INDEX_WORD
;
302 case GL_UNSIGNED_INT
: return BRW_INDEX_DWORD
;
303 default: assert(0); return 0;
308 copy_array_to_vbo_array(struct brw_context
*brw
,
309 struct brw_vertex_element
*element
,
311 struct brw_vertex_buffer
*buffer
,
315 /* If we don't have computed min/max bounds, then this must be a use of
316 * the current attribute, which has a 0 stride. Otherwise, we wouldn't
317 * know what data to upload.
319 assert(element
->glarray
->StrideB
== 0);
321 intel_upload_data(&brw
->intel
, element
->glarray
->Ptr
,
322 element
->element_size
,
323 element
->element_size
,
324 &buffer
->bo
, &buffer
->offset
);
330 int src_stride
= element
->glarray
->StrideB
;
331 const unsigned char *src
= element
->glarray
->Ptr
+ min
* src_stride
;
332 int count
= max
- min
+ 1;
333 GLuint size
= count
* dst_stride
;
335 if (dst_stride
== src_stride
) {
336 intel_upload_data(&brw
->intel
, src
, size
, dst_stride
,
337 &buffer
->bo
, &buffer
->offset
);
339 char * const map
= intel_upload_map(&brw
->intel
, size
, dst_stride
);
343 memcpy(dst
, src
, dst_stride
);
347 intel_upload_unmap(&brw
->intel
, map
, size
, dst_stride
,
348 &buffer
->bo
, &buffer
->offset
);
350 buffer
->stride
= dst_stride
;
353 static void brw_prepare_vertices(struct brw_context
*brw
)
355 struct gl_context
*ctx
= &brw
->intel
.ctx
;
356 struct intel_context
*intel
= intel_context(ctx
);
357 /* CACHE_NEW_VS_PROG */
358 GLbitfield64 vs_inputs
= brw
->vs
.prog_data
->inputs_read
;
359 const unsigned char *ptr
= NULL
;
360 GLuint interleaved
= 0, total_size
= 0;
361 unsigned int min_index
= brw
->vb
.min_index
;
362 unsigned int max_index
= brw
->vb
.max_index
;
365 struct brw_vertex_element
*upload
[VERT_ATTRIB_MAX
];
366 GLuint nr_uploads
= 0;
368 /* First build an array of pointers to ve's in vb.inputs_read
371 printf("%s %d..%d\n", __FUNCTION__
, min_index
, max_index
);
373 /* Accumulate the list of enabled arrays. */
374 brw
->vb
.nr_enabled
= 0;
376 GLuint i
= ffsll(vs_inputs
) - 1;
377 struct brw_vertex_element
*input
= &brw
->vb
.inputs
[i
];
379 vs_inputs
&= ~BITFIELD64_BIT(i
);
380 if (input
->glarray
->Size
&& get_size(input
->glarray
->Type
))
381 brw
->vb
.enabled
[brw
->vb
.nr_enabled
++] = input
;
384 if (brw
->vb
.nr_enabled
== 0)
387 if (brw
->vb
.nr_buffers
)
390 for (i
= j
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
391 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
392 const struct gl_client_array
*glarray
= input
->glarray
;
393 int type_size
= get_size(glarray
->Type
);
395 input
->element_size
= type_size
* glarray
->Size
;
397 if (_mesa_is_bufferobj(glarray
->BufferObj
)) {
398 struct intel_buffer_object
*intel_buffer
=
399 intel_buffer_object(glarray
->BufferObj
);
402 for (k
= 0; k
< i
; k
++) {
403 const struct gl_client_array
*other
= brw
->vb
.enabled
[k
]->glarray
;
404 if (glarray
->BufferObj
== other
->BufferObj
&&
405 glarray
->StrideB
== other
->StrideB
&&
406 (uintptr_t)(glarray
->Ptr
- other
->Ptr
) < glarray
->StrideB
)
408 input
->buffer
= brw
->vb
.enabled
[k
]->buffer
;
409 input
->offset
= glarray
->Ptr
- other
->Ptr
;
414 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[j
];
416 /* Named buffer object: Just reference its contents directly. */
417 buffer
->bo
= intel_bufferobj_source(intel
,
418 intel_buffer
, type_size
,
420 drm_intel_bo_reference(buffer
->bo
);
421 buffer
->offset
+= (uintptr_t)glarray
->Ptr
;
422 buffer
->stride
= glarray
->StrideB
;
428 /* This is a common place to reach if the user mistakenly supplies
429 * a pointer in place of a VBO offset. If we just let it go through,
430 * we may end up dereferencing a pointer beyond the bounds of the
431 * GTT. We would hope that the VBO's max_index would save us, but
432 * Mesa appears to hand us min/max values not clipped to the
433 * array object's _MaxElement, and _MaxElement frequently appears
434 * to be wrong anyway.
436 * The VBO spec allows application termination in this case, and it's
437 * probably a service to the poor programmer to do so rather than
438 * trying to just not render.
440 assert(input
->offset
< brw
->vb
.buffers
[input
->buffer
].bo
->size
);
442 /* Queue the buffer object up to be uploaded in the next pass,
443 * when we've decided if we're doing interleaved or not.
445 if (nr_uploads
== 0) {
446 /* Position array not properly enabled:
448 if (input
->attrib
== VERT_ATTRIB_POS
&& glarray
->StrideB
== 0) {
449 intel
->Fallback
= true; /* boolean, not bitfield */
453 interleaved
= glarray
->StrideB
;
456 else if (interleaved
!= glarray
->StrideB
||
457 (uintptr_t)(glarray
->Ptr
- ptr
) > interleaved
)
461 else if ((uintptr_t)(glarray
->Ptr
- ptr
) & (type_size
-1))
463 /* enforce natural alignment (for doubles) */
467 upload
[nr_uploads
++] = input
;
468 total_size
= ALIGN(total_size
, type_size
);
469 total_size
+= input
->element_size
;
473 /* If we need to upload all the arrays, then we can trim those arrays to
474 * only the used elements [min_index, max_index] so long as we adjust all
475 * the values used in the 3DPRIMITIVE i.e. by setting the vertex bias.
477 brw
->vb
.start_vertex_bias
= 0;
479 if (nr_uploads
== brw
->vb
.nr_enabled
) {
480 brw
->vb
.start_vertex_bias
= -delta
;
483 if (delta
&& !brw
->intel
.intelScreen
->relaxed_relocations
)
484 min_index
= delta
= 0;
486 /* Handle any arrays to be uploaded. */
487 if (nr_uploads
> 1) {
488 if (interleaved
&& interleaved
<= 2*total_size
) {
489 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[j
];
490 /* All uploads are interleaved, so upload the arrays together as
491 * interleaved. First, upload the contents and set up upload[0].
493 copy_array_to_vbo_array(brw
, upload
[0], min_index
, max_index
,
494 buffer
, interleaved
);
495 buffer
->offset
-= delta
* interleaved
;
497 for (i
= 0; i
< nr_uploads
; i
++) {
498 /* Then, just point upload[i] at upload[0]'s buffer. */
500 ((const unsigned char *)upload
[i
]->glarray
->Ptr
- ptr
);
501 upload
[i
]->buffer
= j
;
507 else if (total_size
< 2048) {
508 /* Upload non-interleaved arrays into a single interleaved array */
509 struct brw_vertex_buffer
*buffer
;
510 int count
= MAX2(max_index
- min_index
+ 1, 1);
514 map
= intel_upload_map(&brw
->intel
, total_size
* count
, total_size
);
515 for (i
= offset
= 0; i
< nr_uploads
; i
++) {
516 const unsigned char *src
= upload
[i
]->glarray
->Ptr
;
517 int size
= upload
[i
]->element_size
;
518 int stride
= upload
[i
]->glarray
->StrideB
;
522 offset
= ALIGN(offset
, get_size(upload
[i
]->glarray
->Type
));
524 src
+= min_index
* stride
;
526 for (n
= 0; n
< count
; n
++) {
527 memcpy(dst
, src
, size
);
532 upload
[i
]->offset
= offset
;
533 upload
[i
]->buffer
= j
;
537 assert(offset
== total_size
);
538 buffer
= &brw
->vb
.buffers
[j
++];
539 intel_upload_unmap(&brw
->intel
, map
, offset
* count
, offset
,
540 &buffer
->bo
, &buffer
->offset
);
541 buffer
->stride
= offset
;
542 buffer
->offset
-= delta
* offset
;
547 /* Upload non-interleaved arrays */
548 for (i
= 0; i
< nr_uploads
; i
++) {
549 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[j
];
550 copy_array_to_vbo_array(brw
, upload
[i
], min_index
, max_index
,
551 buffer
, upload
[i
]->element_size
);
552 buffer
->offset
-= delta
* buffer
->stride
;
553 upload
[i
]->buffer
= j
++;
554 upload
[i
]->offset
= 0;
557 /* can we simply extend the current vb? */
558 if (j
== brw
->vb
.nr_current_buffers
) {
560 for (i
= 0; i
< j
; i
++) {
563 if (brw
->vb
.current_buffers
[i
].handle
!= brw
->vb
.buffers
[i
].bo
->handle
||
564 brw
->vb
.current_buffers
[i
].stride
!= brw
->vb
.buffers
[i
].stride
)
567 d
= brw
->vb
.buffers
[i
].offset
- brw
->vb
.current_buffers
[i
].offset
;
571 delta
= d
/ brw
->vb
.current_buffers
[i
].stride
;
572 if (delta
* brw
->vb
.current_buffers
[i
].stride
!= d
)
577 brw
->vb
.start_vertex_bias
+= delta
;
579 drm_intel_bo_unreference(brw
->vb
.buffers
[j
].bo
);
584 brw
->vb
.nr_buffers
= j
;
587 brw_prepare_query_begin(brw
);
590 static void brw_emit_vertices(struct brw_context
*brw
)
592 struct gl_context
*ctx
= &brw
->intel
.ctx
;
593 struct intel_context
*intel
= intel_context(ctx
);
594 GLuint i
, nr_elements
;
596 brw_prepare_vertices(brw
);
598 brw_emit_query_begin(brw
);
600 /* If the VS doesn't read any inputs (calculating vertex position from
601 * a state variable for some reason, for example), emit a single pad
602 * VERTEX_ELEMENT struct and bail.
604 * The stale VB state stays in place, but they don't do anything unless
605 * a VE loads from them.
607 if (brw
->vb
.nr_enabled
== 0) {
609 OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS
<< 16) | 1);
610 if (intel
->gen
>= 6) {
611 OUT_BATCH((0 << GEN6_VE0_INDEX_SHIFT
) |
613 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
<< BRW_VE0_FORMAT_SHIFT
) |
614 (0 << BRW_VE0_SRC_OFFSET_SHIFT
));
616 OUT_BATCH((0 << BRW_VE0_INDEX_SHIFT
) |
618 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
<< BRW_VE0_FORMAT_SHIFT
) |
619 (0 << BRW_VE0_SRC_OFFSET_SHIFT
));
621 OUT_BATCH((BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
622 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_1_SHIFT
) |
623 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_2_SHIFT
) |
624 (BRW_VE1_COMPONENT_STORE_1_FLT
<< BRW_VE1_COMPONENT_3_SHIFT
));
629 /* Now emit VB and VEP state packets.
632 if (brw
->vb
.nr_buffers
) {
633 if (intel
->gen
>= 6) {
634 assert(brw
->vb
.nr_buffers
<= 33);
636 assert(brw
->vb
.nr_buffers
<= 17);
639 BEGIN_BATCH(1 + 4*brw
->vb
.nr_buffers
);
640 OUT_BATCH((_3DSTATE_VERTEX_BUFFERS
<< 16) | (4*brw
->vb
.nr_buffers
- 1));
641 for (i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
642 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[i
];
645 if (intel
->gen
>= 6) {
646 dw0
= GEN6_VB0_ACCESS_VERTEXDATA
| (i
<< GEN6_VB0_INDEX_SHIFT
);
648 dw0
= BRW_VB0_ACCESS_VERTEXDATA
| (i
<< BRW_VB0_INDEX_SHIFT
);
652 dw0
|= GEN7_VB0_ADDRESS_MODIFYENABLE
;
654 OUT_BATCH(dw0
| (buffer
->stride
<< BRW_VB0_PITCH_SHIFT
));
655 OUT_RELOC(buffer
->bo
, I915_GEM_DOMAIN_VERTEX
, 0, buffer
->offset
);
656 if (intel
->gen
>= 5) {
657 OUT_RELOC(buffer
->bo
, I915_GEM_DOMAIN_VERTEX
, 0, buffer
->bo
->size
- 1);
660 OUT_BATCH(0); /* Instance data step rate */
662 brw
->vb
.current_buffers
[i
].handle
= buffer
->bo
->handle
;
663 brw
->vb
.current_buffers
[i
].offset
= buffer
->offset
;
664 brw
->vb
.current_buffers
[i
].stride
= buffer
->stride
;
666 brw
->vb
.nr_current_buffers
= i
;
670 nr_elements
= brw
->vb
.nr_enabled
+ brw
->vs
.prog_data
->uses_vertexid
;
672 /* The hardware allows one more VERTEX_ELEMENTS than VERTEX_BUFFERS, presumably
673 * for VertexID/InstanceID.
675 if (intel
->gen
>= 6) {
676 assert(nr_elements
<= 34);
678 assert(nr_elements
<= 18);
681 BEGIN_BATCH(1 + nr_elements
* 2);
682 OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS
<< 16) | (2 * nr_elements
- 1));
683 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
684 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
685 uint32_t format
= get_surface_type(input
->glarray
->Type
,
686 input
->glarray
->Size
,
687 input
->glarray
->Format
,
688 input
->glarray
->Normalized
,
689 input
->glarray
->Integer
);
690 uint32_t comp0
= BRW_VE1_COMPONENT_STORE_SRC
;
691 uint32_t comp1
= BRW_VE1_COMPONENT_STORE_SRC
;
692 uint32_t comp2
= BRW_VE1_COMPONENT_STORE_SRC
;
693 uint32_t comp3
= BRW_VE1_COMPONENT_STORE_SRC
;
695 switch (input
->glarray
->Size
) {
696 case 0: comp0
= BRW_VE1_COMPONENT_STORE_0
;
697 case 1: comp1
= BRW_VE1_COMPONENT_STORE_0
;
698 case 2: comp2
= BRW_VE1_COMPONENT_STORE_0
;
699 case 3: comp3
= input
->glarray
->Integer
? BRW_VE1_COMPONENT_STORE_1_INT
700 : BRW_VE1_COMPONENT_STORE_1_FLT
;
704 if (intel
->gen
>= 6) {
705 OUT_BATCH((input
->buffer
<< GEN6_VE0_INDEX_SHIFT
) |
707 (format
<< BRW_VE0_FORMAT_SHIFT
) |
708 (input
->offset
<< BRW_VE0_SRC_OFFSET_SHIFT
));
710 OUT_BATCH((input
->buffer
<< BRW_VE0_INDEX_SHIFT
) |
712 (format
<< BRW_VE0_FORMAT_SHIFT
) |
713 (input
->offset
<< BRW_VE0_SRC_OFFSET_SHIFT
));
717 OUT_BATCH((comp0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
718 (comp1
<< BRW_VE1_COMPONENT_1_SHIFT
) |
719 (comp2
<< BRW_VE1_COMPONENT_2_SHIFT
) |
720 (comp3
<< BRW_VE1_COMPONENT_3_SHIFT
));
722 OUT_BATCH((comp0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
723 (comp1
<< BRW_VE1_COMPONENT_1_SHIFT
) |
724 (comp2
<< BRW_VE1_COMPONENT_2_SHIFT
) |
725 (comp3
<< BRW_VE1_COMPONENT_3_SHIFT
) |
726 ((i
* 4) << BRW_VE1_DST_OFFSET_SHIFT
));
729 if (brw
->vs
.prog_data
->uses_vertexid
) {
730 uint32_t dw0
= 0, dw1
= 0;
732 dw1
= ((BRW_VE1_COMPONENT_STORE_VID
<< BRW_VE1_COMPONENT_0_SHIFT
) |
733 (BRW_VE1_COMPONENT_STORE_IID
<< BRW_VE1_COMPONENT_1_SHIFT
) |
734 (BRW_VE1_COMPONENT_STORE_PID
<< BRW_VE1_COMPONENT_2_SHIFT
) |
735 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_3_SHIFT
));
737 if (intel
->gen
>= 6) {
738 dw0
|= GEN6_VE0_VALID
;
740 dw0
|= BRW_VE0_VALID
;
741 dw1
|= (i
* 4) << BRW_VE1_DST_OFFSET_SHIFT
;
744 /* Note that for gl_VertexID, gl_InstanceID, and gl_PrimitiveID values,
745 * the format is ignored and the value is always int.
755 const struct brw_tracked_state brw_vertices
= {
758 .brw
= BRW_NEW_BATCH
| BRW_NEW_VERTICES
,
759 .cache
= CACHE_NEW_VS_PROG
,
761 .emit
= brw_emit_vertices
,
764 static void brw_upload_indices(struct brw_context
*brw
)
766 struct gl_context
*ctx
= &brw
->intel
.ctx
;
767 struct intel_context
*intel
= &brw
->intel
;
768 const struct _mesa_index_buffer
*index_buffer
= brw
->ib
.ib
;
770 drm_intel_bo
*bo
= NULL
;
771 struct gl_buffer_object
*bufferobj
;
775 if (index_buffer
== NULL
)
778 ib_type_size
= get_size(index_buffer
->type
);
779 ib_size
= ib_type_size
* index_buffer
->count
;
780 bufferobj
= index_buffer
->obj
;
782 /* Turn into a proper VBO:
784 if (!_mesa_is_bufferobj(bufferobj
)) {
786 /* Get new bufferobj, offset:
788 intel_upload_data(&brw
->intel
, index_buffer
->ptr
, ib_size
, ib_type_size
,
790 brw
->ib
.start_vertex_offset
= offset
/ ib_type_size
;
792 offset
= (GLuint
) (unsigned long) index_buffer
->ptr
;
794 /* If the index buffer isn't aligned to its element size, we have to
795 * rebase it into a temporary.
797 if ((get_size(index_buffer
->type
) - 1) & offset
) {
798 GLubyte
*map
= ctx
->Driver
.MapBufferRange(ctx
,
804 intel_upload_data(&brw
->intel
, map
, ib_size
, ib_type_size
,
806 brw
->ib
.start_vertex_offset
= offset
/ ib_type_size
;
808 ctx
->Driver
.UnmapBuffer(ctx
, bufferobj
);
810 /* Use CMD_3D_PRIM's start_vertex_offset to avoid re-uploading
811 * the index buffer state when we're just moving the start index
814 brw
->ib
.start_vertex_offset
= offset
/ ib_type_size
;
816 bo
= intel_bufferobj_source(intel
,
817 intel_buffer_object(bufferobj
),
820 drm_intel_bo_reference(bo
);
822 brw
->ib
.start_vertex_offset
+= offset
/ ib_type_size
;
826 if (brw
->ib
.bo
!= bo
) {
827 drm_intel_bo_unreference(brw
->ib
.bo
);
830 brw
->state
.dirty
.brw
|= BRW_NEW_INDEX_BUFFER
;
832 drm_intel_bo_unreference(bo
);
835 if (index_buffer
->type
!= brw
->ib
.type
) {
836 brw
->ib
.type
= index_buffer
->type
;
837 brw
->state
.dirty
.brw
|= BRW_NEW_INDEX_BUFFER
;
841 const struct brw_tracked_state brw_indices
= {
844 .brw
= BRW_NEW_INDICES
,
847 .emit
= brw_upload_indices
,
850 static void brw_emit_index_buffer(struct brw_context
*brw
)
852 struct intel_context
*intel
= &brw
->intel
;
853 const struct _mesa_index_buffer
*index_buffer
= brw
->ib
.ib
;
855 if (index_buffer
== NULL
)
859 OUT_BATCH(CMD_INDEX_BUFFER
<< 16 |
860 /* cut index enable << 10 */
861 get_index_type(index_buffer
->type
) << 8 |
863 OUT_RELOC(brw
->ib
.bo
,
864 I915_GEM_DOMAIN_VERTEX
, 0,
866 OUT_RELOC(brw
->ib
.bo
,
867 I915_GEM_DOMAIN_VERTEX
, 0,
868 brw
->ib
.bo
->size
- 1);
872 const struct brw_tracked_state brw_index_buffer
= {
875 .brw
= BRW_NEW_BATCH
| BRW_NEW_INDEX_BUFFER
,
878 .emit
= brw_emit_index_buffer
,