1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 #include "main/glheader.h"
31 #include "main/bufferobj.h"
32 #include "main/context.h"
33 #include "main/enums.h"
34 #include "main/macros.h"
37 #include "brw_defines.h"
38 #include "brw_context.h"
39 #include "brw_state.h"
41 #include "intel_batchbuffer.h"
42 #include "intel_buffer_objects.h"
44 static GLuint double_types
[5] = {
46 BRW_SURFACEFORMAT_R64_FLOAT
,
47 BRW_SURFACEFORMAT_R64G64_FLOAT
,
48 BRW_SURFACEFORMAT_R64G64B64_FLOAT
,
49 BRW_SURFACEFORMAT_R64G64B64A64_FLOAT
52 static GLuint float_types
[5] = {
54 BRW_SURFACEFORMAT_R32_FLOAT
,
55 BRW_SURFACEFORMAT_R32G32_FLOAT
,
56 BRW_SURFACEFORMAT_R32G32B32_FLOAT
,
57 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
60 static GLuint half_float_types
[5] = {
62 BRW_SURFACEFORMAT_R16_FLOAT
,
63 BRW_SURFACEFORMAT_R16G16_FLOAT
,
64 BRW_SURFACEFORMAT_R16G16B16A16_FLOAT
,
65 BRW_SURFACEFORMAT_R16G16B16A16_FLOAT
68 static GLuint uint_types_norm
[5] = {
70 BRW_SURFACEFORMAT_R32_UNORM
,
71 BRW_SURFACEFORMAT_R32G32_UNORM
,
72 BRW_SURFACEFORMAT_R32G32B32_UNORM
,
73 BRW_SURFACEFORMAT_R32G32B32A32_UNORM
76 static GLuint uint_types_scale
[5] = {
78 BRW_SURFACEFORMAT_R32_USCALED
,
79 BRW_SURFACEFORMAT_R32G32_USCALED
,
80 BRW_SURFACEFORMAT_R32G32B32_USCALED
,
81 BRW_SURFACEFORMAT_R32G32B32A32_USCALED
84 static GLuint int_types_norm
[5] = {
86 BRW_SURFACEFORMAT_R32_SNORM
,
87 BRW_SURFACEFORMAT_R32G32_SNORM
,
88 BRW_SURFACEFORMAT_R32G32B32_SNORM
,
89 BRW_SURFACEFORMAT_R32G32B32A32_SNORM
92 static GLuint int_types_scale
[5] = {
94 BRW_SURFACEFORMAT_R32_SSCALED
,
95 BRW_SURFACEFORMAT_R32G32_SSCALED
,
96 BRW_SURFACEFORMAT_R32G32B32_SSCALED
,
97 BRW_SURFACEFORMAT_R32G32B32A32_SSCALED
100 static GLuint ushort_types_norm
[5] = {
102 BRW_SURFACEFORMAT_R16_UNORM
,
103 BRW_SURFACEFORMAT_R16G16_UNORM
,
104 BRW_SURFACEFORMAT_R16G16B16_UNORM
,
105 BRW_SURFACEFORMAT_R16G16B16A16_UNORM
108 static GLuint ushort_types_scale
[5] = {
110 BRW_SURFACEFORMAT_R16_USCALED
,
111 BRW_SURFACEFORMAT_R16G16_USCALED
,
112 BRW_SURFACEFORMAT_R16G16B16_USCALED
,
113 BRW_SURFACEFORMAT_R16G16B16A16_USCALED
116 static GLuint short_types_norm
[5] = {
118 BRW_SURFACEFORMAT_R16_SNORM
,
119 BRW_SURFACEFORMAT_R16G16_SNORM
,
120 BRW_SURFACEFORMAT_R16G16B16_SNORM
,
121 BRW_SURFACEFORMAT_R16G16B16A16_SNORM
124 static GLuint short_types_scale
[5] = {
126 BRW_SURFACEFORMAT_R16_SSCALED
,
127 BRW_SURFACEFORMAT_R16G16_SSCALED
,
128 BRW_SURFACEFORMAT_R16G16B16_SSCALED
,
129 BRW_SURFACEFORMAT_R16G16B16A16_SSCALED
132 static GLuint ubyte_types_norm
[5] = {
134 BRW_SURFACEFORMAT_R8_UNORM
,
135 BRW_SURFACEFORMAT_R8G8_UNORM
,
136 BRW_SURFACEFORMAT_R8G8B8_UNORM
,
137 BRW_SURFACEFORMAT_R8G8B8A8_UNORM
140 static GLuint ubyte_types_scale
[5] = {
142 BRW_SURFACEFORMAT_R8_USCALED
,
143 BRW_SURFACEFORMAT_R8G8_USCALED
,
144 BRW_SURFACEFORMAT_R8G8B8_USCALED
,
145 BRW_SURFACEFORMAT_R8G8B8A8_USCALED
148 static GLuint byte_types_norm
[5] = {
150 BRW_SURFACEFORMAT_R8_SNORM
,
151 BRW_SURFACEFORMAT_R8G8_SNORM
,
152 BRW_SURFACEFORMAT_R8G8B8_SNORM
,
153 BRW_SURFACEFORMAT_R8G8B8A8_SNORM
156 static GLuint byte_types_scale
[5] = {
158 BRW_SURFACEFORMAT_R8_SSCALED
,
159 BRW_SURFACEFORMAT_R8G8_SSCALED
,
160 BRW_SURFACEFORMAT_R8G8B8_SSCALED
,
161 BRW_SURFACEFORMAT_R8G8B8A8_SSCALED
166 * Given vertex array type/size/format/normalized info, return
167 * the appopriate hardware surface type.
168 * Format will be GL_RGBA or possibly GL_BGRA for GLubyte[4] color arrays.
170 static GLuint
get_surface_type( GLenum type
, GLuint size
,
171 GLenum format
, bool normalized
)
173 if (unlikely(INTEL_DEBUG
& DEBUG_VERTS
))
174 printf("type %s size %d normalized %d\n",
175 _mesa_lookup_enum_by_nr(type
), size
, normalized
);
179 case GL_DOUBLE
: return double_types
[size
];
180 case GL_FLOAT
: return float_types
[size
];
181 case GL_HALF_FLOAT
: return half_float_types
[size
];
182 case GL_INT
: return int_types_norm
[size
];
183 case GL_SHORT
: return short_types_norm
[size
];
184 case GL_BYTE
: return byte_types_norm
[size
];
185 case GL_UNSIGNED_INT
: return uint_types_norm
[size
];
186 case GL_UNSIGNED_SHORT
: return ushort_types_norm
[size
];
187 case GL_UNSIGNED_BYTE
:
188 if (format
== GL_BGRA
) {
189 /* See GL_EXT_vertex_array_bgra */
191 return BRW_SURFACEFORMAT_B8G8R8A8_UNORM
;
194 return ubyte_types_norm
[size
];
196 default: assert(0); return 0;
200 assert(format
== GL_RGBA
); /* sanity check */
202 case GL_DOUBLE
: return double_types
[size
];
203 case GL_FLOAT
: return float_types
[size
];
204 case GL_HALF_FLOAT
: return half_float_types
[size
];
205 case GL_INT
: return int_types_scale
[size
];
206 case GL_SHORT
: return short_types_scale
[size
];
207 case GL_BYTE
: return byte_types_scale
[size
];
208 case GL_UNSIGNED_INT
: return uint_types_scale
[size
];
209 case GL_UNSIGNED_SHORT
: return ushort_types_scale
[size
];
210 case GL_UNSIGNED_BYTE
: return ubyte_types_scale
[size
];
211 /* This produces GL_FIXED inputs as values between INT32_MIN and
212 * INT32_MAX, which will be scaled down by 1/65536 by the VS.
214 case GL_FIXED
: return int_types_scale
[size
];
215 default: assert(0); return 0;
221 static GLuint
get_size( GLenum type
)
224 case GL_DOUBLE
: return sizeof(GLdouble
);
225 case GL_FLOAT
: return sizeof(GLfloat
);
226 case GL_HALF_FLOAT
: return sizeof(GLhalfARB
);
227 case GL_INT
: return sizeof(GLint
);
228 case GL_SHORT
: return sizeof(GLshort
);
229 case GL_BYTE
: return sizeof(GLbyte
);
230 case GL_UNSIGNED_INT
: return sizeof(GLuint
);
231 case GL_UNSIGNED_SHORT
: return sizeof(GLushort
);
232 case GL_UNSIGNED_BYTE
: return sizeof(GLubyte
);
233 case GL_FIXED
: return sizeof(GLuint
);
234 default: assert(0); return 0;
238 static GLuint
get_index_type(GLenum type
)
241 case GL_UNSIGNED_BYTE
: return BRW_INDEX_BYTE
;
242 case GL_UNSIGNED_SHORT
: return BRW_INDEX_WORD
;
243 case GL_UNSIGNED_INT
: return BRW_INDEX_DWORD
;
244 default: assert(0); return 0;
249 copy_array_to_vbo_array(struct brw_context
*brw
,
250 struct brw_vertex_element
*element
,
252 struct brw_vertex_buffer
*buffer
,
256 /* If we don't have computed min/max bounds, then this must be a use of
257 * the current attribute, which has a 0 stride. Otherwise, we wouldn't
258 * know what data to upload.
260 assert(element
->glarray
->StrideB
== 0);
262 intel_upload_data(&brw
->intel
, element
->glarray
->Ptr
,
263 element
->element_size
,
264 element
->element_size
,
265 &buffer
->bo
, &buffer
->offset
);
271 int src_stride
= element
->glarray
->StrideB
;
272 const unsigned char *src
= element
->glarray
->Ptr
+ min
* src_stride
;
273 int count
= max
- min
+ 1;
274 GLuint size
= count
* dst_stride
;
276 if (dst_stride
== src_stride
) {
277 intel_upload_data(&brw
->intel
, src
, size
, dst_stride
,
278 &buffer
->bo
, &buffer
->offset
);
280 char * const map
= intel_upload_map(&brw
->intel
, size
, dst_stride
);
284 memcpy(dst
, src
, dst_stride
);
288 intel_upload_unmap(&brw
->intel
, map
, size
, dst_stride
,
289 &buffer
->bo
, &buffer
->offset
);
291 buffer
->stride
= dst_stride
;
294 static void brw_prepare_vertices(struct brw_context
*brw
)
296 struct gl_context
*ctx
= &brw
->intel
.ctx
;
297 struct intel_context
*intel
= intel_context(ctx
);
298 /* CACHE_NEW_VS_PROG */
299 GLbitfield vs_inputs
= brw
->vs
.prog_data
->inputs_read
;
300 const unsigned char *ptr
= NULL
;
301 GLuint interleaved
= 0, total_size
= 0;
302 unsigned int min_index
= brw
->vb
.min_index
;
303 unsigned int max_index
= brw
->vb
.max_index
;
306 struct brw_vertex_element
*upload
[VERT_ATTRIB_MAX
];
307 GLuint nr_uploads
= 0;
309 /* First build an array of pointers to ve's in vb.inputs_read
312 printf("%s %d..%d\n", __FUNCTION__
, min_index
, max_index
);
314 /* Accumulate the list of enabled arrays. */
315 brw
->vb
.nr_enabled
= 0;
317 GLuint i
= ffs(vs_inputs
) - 1;
318 struct brw_vertex_element
*input
= &brw
->vb
.inputs
[i
];
320 vs_inputs
&= ~(1 << i
);
321 if (input
->glarray
->Size
&& get_size(input
->glarray
->Type
))
322 brw
->vb
.enabled
[brw
->vb
.nr_enabled
++] = input
;
325 if (brw
->vb
.nr_enabled
== 0)
328 if (brw
->vb
.nr_buffers
)
331 /* XXX: In the rare cases where this happens we fallback all
332 * the way to software rasterization, although a tnl fallback
333 * would be sufficient. I don't know of *any* real world
334 * cases with > 17 vertex attributes enabled, so it probably
335 * isn't an issue at this point.
337 if (brw
->vb
.nr_enabled
>= BRW_VEP_MAX
) {
338 intel
->Fallback
= true; /* boolean, not bitfield */
342 for (i
= j
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
343 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
344 const struct gl_client_array
*glarray
= input
->glarray
;
345 int type_size
= get_size(glarray
->Type
);
347 input
->element_size
= type_size
* glarray
->Size
;
349 if (_mesa_is_bufferobj(glarray
->BufferObj
)) {
350 struct intel_buffer_object
*intel_buffer
=
351 intel_buffer_object(glarray
->BufferObj
);
354 for (k
= 0; k
< i
; k
++) {
355 const struct gl_client_array
*other
= brw
->vb
.enabled
[k
]->glarray
;
356 if (glarray
->BufferObj
== other
->BufferObj
&&
357 glarray
->StrideB
== other
->StrideB
&&
358 (uintptr_t)(glarray
->Ptr
- other
->Ptr
) < glarray
->StrideB
)
360 input
->buffer
= brw
->vb
.enabled
[k
]->buffer
;
361 input
->offset
= glarray
->Ptr
- other
->Ptr
;
366 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[j
];
368 /* Named buffer object: Just reference its contents directly. */
369 buffer
->bo
= intel_bufferobj_source(intel
,
370 intel_buffer
, type_size
,
372 drm_intel_bo_reference(buffer
->bo
);
373 buffer
->offset
+= (uintptr_t)glarray
->Ptr
;
374 buffer
->stride
= glarray
->StrideB
;
380 /* This is a common place to reach if the user mistakenly supplies
381 * a pointer in place of a VBO offset. If we just let it go through,
382 * we may end up dereferencing a pointer beyond the bounds of the
383 * GTT. We would hope that the VBO's max_index would save us, but
384 * Mesa appears to hand us min/max values not clipped to the
385 * array object's _MaxElement, and _MaxElement frequently appears
386 * to be wrong anyway.
388 * The VBO spec allows application termination in this case, and it's
389 * probably a service to the poor programmer to do so rather than
390 * trying to just not render.
392 assert(input
->offset
< brw
->vb
.buffers
[input
->buffer
].bo
->size
);
394 /* Queue the buffer object up to be uploaded in the next pass,
395 * when we've decided if we're doing interleaved or not.
397 if (nr_uploads
== 0) {
398 /* Position array not properly enabled:
400 if (input
->attrib
== VERT_ATTRIB_POS
&& glarray
->StrideB
== 0) {
401 intel
->Fallback
= true; /* boolean, not bitfield */
405 interleaved
= glarray
->StrideB
;
408 else if (interleaved
!= glarray
->StrideB
||
409 (uintptr_t)(glarray
->Ptr
- ptr
) > interleaved
)
413 else if ((uintptr_t)(glarray
->Ptr
- ptr
) & (type_size
-1))
415 /* enforce natural alignment (for doubles) */
419 upload
[nr_uploads
++] = input
;
420 total_size
= ALIGN(total_size
, type_size
);
421 total_size
+= input
->element_size
;
425 /* If we need to upload all the arrays, then we can trim those arrays to
426 * only the used elements [min_index, max_index] so long as we adjust all
427 * the values used in the 3DPRIMITIVE i.e. by setting the vertex bias.
429 brw
->vb
.start_vertex_bias
= 0;
431 if (nr_uploads
== brw
->vb
.nr_enabled
) {
432 brw
->vb
.start_vertex_bias
= -delta
;
435 if (delta
&& !brw
->intel
.intelScreen
->relaxed_relocations
)
436 min_index
= delta
= 0;
438 /* Handle any arrays to be uploaded. */
439 if (nr_uploads
> 1) {
440 if (interleaved
&& interleaved
<= 2*total_size
) {
441 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[j
];
442 /* All uploads are interleaved, so upload the arrays together as
443 * interleaved. First, upload the contents and set up upload[0].
445 copy_array_to_vbo_array(brw
, upload
[0], min_index
, max_index
,
446 buffer
, interleaved
);
447 buffer
->offset
-= delta
* interleaved
;
449 for (i
= 0; i
< nr_uploads
; i
++) {
450 /* Then, just point upload[i] at upload[0]'s buffer. */
452 ((const unsigned char *)upload
[i
]->glarray
->Ptr
- ptr
);
453 upload
[i
]->buffer
= j
;
459 else if (total_size
< 2048) {
460 /* Upload non-interleaved arrays into a single interleaved array */
461 struct brw_vertex_buffer
*buffer
;
462 int count
= MAX2(max_index
- min_index
+ 1, 1);
466 map
= intel_upload_map(&brw
->intel
, total_size
* count
, total_size
);
467 for (i
= offset
= 0; i
< nr_uploads
; i
++) {
468 const unsigned char *src
= upload
[i
]->glarray
->Ptr
;
469 int size
= upload
[i
]->element_size
;
470 int stride
= upload
[i
]->glarray
->StrideB
;
474 offset
= ALIGN(offset
, get_size(upload
[i
]->glarray
->Type
));
476 src
+= min_index
* stride
;
478 for (n
= 0; n
< count
; n
++) {
479 memcpy(dst
, src
, size
);
484 upload
[i
]->offset
= offset
;
485 upload
[i
]->buffer
= j
;
489 assert(offset
== total_size
);
490 buffer
= &brw
->vb
.buffers
[j
++];
491 intel_upload_unmap(&brw
->intel
, map
, offset
* count
, offset
,
492 &buffer
->bo
, &buffer
->offset
);
493 buffer
->stride
= offset
;
494 buffer
->offset
-= delta
* offset
;
499 /* Upload non-interleaved arrays */
500 for (i
= 0; i
< nr_uploads
; i
++) {
501 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[j
];
502 copy_array_to_vbo_array(brw
, upload
[i
], min_index
, max_index
,
503 buffer
, upload
[i
]->element_size
);
504 buffer
->offset
-= delta
* buffer
->stride
;
505 upload
[i
]->buffer
= j
++;
506 upload
[i
]->offset
= 0;
509 /* can we simply extend the current vb? */
510 if (j
== brw
->vb
.nr_current_buffers
) {
512 for (i
= 0; i
< j
; i
++) {
515 if (brw
->vb
.current_buffers
[i
].handle
!= brw
->vb
.buffers
[i
].bo
->handle
||
516 brw
->vb
.current_buffers
[i
].stride
!= brw
->vb
.buffers
[i
].stride
)
519 d
= brw
->vb
.buffers
[i
].offset
- brw
->vb
.current_buffers
[i
].offset
;
523 delta
= d
/ brw
->vb
.current_buffers
[i
].stride
;
524 if (delta
* brw
->vb
.current_buffers
[i
].stride
!= d
)
529 brw
->vb
.start_vertex_bias
+= delta
;
531 drm_intel_bo_unreference(brw
->vb
.buffers
[j
].bo
);
536 brw
->vb
.nr_buffers
= j
;
539 brw_prepare_query_begin(brw
);
540 for (i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
541 brw_add_validated_bo(brw
, brw
->vb
.buffers
[i
].bo
);
545 static void brw_emit_vertices(struct brw_context
*brw
)
547 struct gl_context
*ctx
= &brw
->intel
.ctx
;
548 struct intel_context
*intel
= intel_context(ctx
);
551 brw_emit_query_begin(brw
);
553 /* If the VS doesn't read any inputs (calculating vertex position from
554 * a state variable for some reason, for example), emit a single pad
555 * VERTEX_ELEMENT struct and bail.
557 * The stale VB state stays in place, but they don't do anything unless
558 * a VE loads from them.
560 if (brw
->vb
.nr_enabled
== 0) {
562 OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS
<< 16) | 1);
563 if (intel
->gen
>= 6) {
564 OUT_BATCH((0 << GEN6_VE0_INDEX_SHIFT
) |
566 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
<< BRW_VE0_FORMAT_SHIFT
) |
567 (0 << BRW_VE0_SRC_OFFSET_SHIFT
));
569 OUT_BATCH((0 << BRW_VE0_INDEX_SHIFT
) |
571 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
<< BRW_VE0_FORMAT_SHIFT
) |
572 (0 << BRW_VE0_SRC_OFFSET_SHIFT
));
574 OUT_BATCH((BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
575 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_1_SHIFT
) |
576 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_2_SHIFT
) |
577 (BRW_VE1_COMPONENT_STORE_1_FLT
<< BRW_VE1_COMPONENT_3_SHIFT
));
582 /* Now emit VB and VEP state packets.
585 if (brw
->vb
.nr_buffers
) {
586 BEGIN_BATCH(1 + 4*brw
->vb
.nr_buffers
);
587 OUT_BATCH((_3DSTATE_VERTEX_BUFFERS
<< 16) | (4*brw
->vb
.nr_buffers
- 1));
588 for (i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
589 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[i
];
592 if (intel
->gen
>= 6) {
593 dw0
= GEN6_VB0_ACCESS_VERTEXDATA
| (i
<< GEN6_VB0_INDEX_SHIFT
);
595 dw0
= BRW_VB0_ACCESS_VERTEXDATA
| (i
<< BRW_VB0_INDEX_SHIFT
);
599 dw0
|= GEN7_VB0_ADDRESS_MODIFYENABLE
;
601 OUT_BATCH(dw0
| (buffer
->stride
<< BRW_VB0_PITCH_SHIFT
));
602 OUT_RELOC(buffer
->bo
, I915_GEM_DOMAIN_VERTEX
, 0, buffer
->offset
);
603 if (intel
->gen
>= 5) {
604 OUT_RELOC(buffer
->bo
, I915_GEM_DOMAIN_VERTEX
, 0, buffer
->bo
->size
- 1);
607 OUT_BATCH(0); /* Instance data step rate */
609 brw
->vb
.current_buffers
[i
].handle
= buffer
->bo
->handle
;
610 brw
->vb
.current_buffers
[i
].offset
= buffer
->offset
;
611 brw
->vb
.current_buffers
[i
].stride
= buffer
->stride
;
613 brw
->vb
.nr_current_buffers
= i
;
617 BEGIN_BATCH(1 + brw
->vb
.nr_enabled
* 2);
618 OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS
<< 16) | (2*brw
->vb
.nr_enabled
- 1));
619 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
620 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
621 uint32_t format
= get_surface_type(input
->glarray
->Type
,
622 input
->glarray
->Size
,
623 input
->glarray
->Format
,
624 input
->glarray
->Normalized
);
625 uint32_t comp0
= BRW_VE1_COMPONENT_STORE_SRC
;
626 uint32_t comp1
= BRW_VE1_COMPONENT_STORE_SRC
;
627 uint32_t comp2
= BRW_VE1_COMPONENT_STORE_SRC
;
628 uint32_t comp3
= BRW_VE1_COMPONENT_STORE_SRC
;
630 switch (input
->glarray
->Size
) {
631 case 0: comp0
= BRW_VE1_COMPONENT_STORE_0
;
632 case 1: comp1
= BRW_VE1_COMPONENT_STORE_0
;
633 case 2: comp2
= BRW_VE1_COMPONENT_STORE_0
;
634 case 3: comp3
= BRW_VE1_COMPONENT_STORE_1_FLT
;
638 if (intel
->gen
>= 6) {
639 OUT_BATCH((input
->buffer
<< GEN6_VE0_INDEX_SHIFT
) |
641 (format
<< BRW_VE0_FORMAT_SHIFT
) |
642 (input
->offset
<< BRW_VE0_SRC_OFFSET_SHIFT
));
644 OUT_BATCH((input
->buffer
<< BRW_VE0_INDEX_SHIFT
) |
646 (format
<< BRW_VE0_FORMAT_SHIFT
) |
647 (input
->offset
<< BRW_VE0_SRC_OFFSET_SHIFT
));
651 OUT_BATCH((comp0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
652 (comp1
<< BRW_VE1_COMPONENT_1_SHIFT
) |
653 (comp2
<< BRW_VE1_COMPONENT_2_SHIFT
) |
654 (comp3
<< BRW_VE1_COMPONENT_3_SHIFT
));
656 OUT_BATCH((comp0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
657 (comp1
<< BRW_VE1_COMPONENT_1_SHIFT
) |
658 (comp2
<< BRW_VE1_COMPONENT_2_SHIFT
) |
659 (comp3
<< BRW_VE1_COMPONENT_3_SHIFT
) |
660 ((i
* 4) << BRW_VE1_DST_OFFSET_SHIFT
));
665 const struct brw_tracked_state brw_vertices
= {
668 .brw
= BRW_NEW_BATCH
| BRW_NEW_VERTICES
,
669 .cache
= CACHE_NEW_VS_PROG
,
671 .prepare
= brw_prepare_vertices
,
672 .emit
= brw_emit_vertices
,
675 static void brw_prepare_indices(struct brw_context
*brw
)
677 struct gl_context
*ctx
= &brw
->intel
.ctx
;
678 struct intel_context
*intel
= &brw
->intel
;
679 const struct _mesa_index_buffer
*index_buffer
= brw
->ib
.ib
;
681 drm_intel_bo
*bo
= NULL
;
682 struct gl_buffer_object
*bufferobj
;
686 if (index_buffer
== NULL
)
689 ib_type_size
= get_size(index_buffer
->type
);
690 ib_size
= ib_type_size
* index_buffer
->count
;
691 bufferobj
= index_buffer
->obj
;
693 /* Turn into a proper VBO:
695 if (!_mesa_is_bufferobj(bufferobj
)) {
697 /* Get new bufferobj, offset:
699 intel_upload_data(&brw
->intel
, index_buffer
->ptr
, ib_size
, ib_type_size
,
701 brw
->ib
.start_vertex_offset
= offset
/ ib_type_size
;
703 offset
= (GLuint
) (unsigned long) index_buffer
->ptr
;
705 /* If the index buffer isn't aligned to its element size, we have to
706 * rebase it into a temporary.
708 if ((get_size(index_buffer
->type
) - 1) & offset
) {
709 GLubyte
*map
= ctx
->Driver
.MapBufferRange(ctx
,
715 intel_upload_data(&brw
->intel
, map
, ib_size
, ib_type_size
,
717 brw
->ib
.start_vertex_offset
= offset
/ ib_type_size
;
719 ctx
->Driver
.UnmapBuffer(ctx
, bufferobj
);
721 /* Use CMD_3D_PRIM's start_vertex_offset to avoid re-uploading
722 * the index buffer state when we're just moving the start index
725 brw
->ib
.start_vertex_offset
= offset
/ ib_type_size
;
727 bo
= intel_bufferobj_source(intel
,
728 intel_buffer_object(bufferobj
),
731 drm_intel_bo_reference(bo
);
733 brw
->ib
.start_vertex_offset
+= offset
/ ib_type_size
;
737 if (brw
->ib
.bo
!= bo
) {
738 drm_intel_bo_unreference(brw
->ib
.bo
);
741 brw_add_validated_bo(brw
, brw
->ib
.bo
);
742 brw
->state
.dirty
.brw
|= BRW_NEW_INDEX_BUFFER
;
744 drm_intel_bo_unreference(bo
);
747 if (index_buffer
->type
!= brw
->ib
.type
) {
748 brw
->ib
.type
= index_buffer
->type
;
749 brw
->state
.dirty
.brw
|= BRW_NEW_INDEX_BUFFER
;
753 const struct brw_tracked_state brw_indices
= {
756 .brw
= BRW_NEW_INDICES
,
759 .prepare
= brw_prepare_indices
,
762 static void brw_emit_index_buffer(struct brw_context
*brw
)
764 struct intel_context
*intel
= &brw
->intel
;
765 const struct _mesa_index_buffer
*index_buffer
= brw
->ib
.ib
;
767 if (index_buffer
== NULL
)
771 OUT_BATCH(CMD_INDEX_BUFFER
<< 16 |
772 /* cut index enable << 10 */
773 get_index_type(index_buffer
->type
) << 8 |
775 OUT_RELOC(brw
->ib
.bo
,
776 I915_GEM_DOMAIN_VERTEX
, 0,
778 OUT_RELOC(brw
->ib
.bo
,
779 I915_GEM_DOMAIN_VERTEX
, 0,
780 brw
->ib
.bo
->size
- 1);
784 const struct brw_tracked_state brw_index_buffer
= {
787 .brw
= BRW_NEW_BATCH
| BRW_NEW_INDEX_BUFFER
,
790 .emit
= brw_emit_index_buffer
,