1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 #include "main/glheader.h"
31 #include "main/bufferobj.h"
32 #include "main/context.h"
33 #include "main/enums.h"
36 #include "brw_defines.h"
37 #include "brw_context.h"
38 #include "brw_state.h"
40 #include "intel_batchbuffer.h"
41 #include "intel_buffer_objects.h"
43 static GLuint double_types
[5] = {
45 BRW_SURFACEFORMAT_R64_FLOAT
,
46 BRW_SURFACEFORMAT_R64G64_FLOAT
,
47 BRW_SURFACEFORMAT_R64G64B64_FLOAT
,
48 BRW_SURFACEFORMAT_R64G64B64A64_FLOAT
51 static GLuint float_types
[5] = {
53 BRW_SURFACEFORMAT_R32_FLOAT
,
54 BRW_SURFACEFORMAT_R32G32_FLOAT
,
55 BRW_SURFACEFORMAT_R32G32B32_FLOAT
,
56 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
59 static GLuint half_float_types
[5] = {
61 BRW_SURFACEFORMAT_R16_FLOAT
,
62 BRW_SURFACEFORMAT_R16G16_FLOAT
,
63 BRW_SURFACEFORMAT_R16G16B16A16_FLOAT
,
64 BRW_SURFACEFORMAT_R16G16B16A16_FLOAT
67 static GLuint uint_types_norm
[5] = {
69 BRW_SURFACEFORMAT_R32_UNORM
,
70 BRW_SURFACEFORMAT_R32G32_UNORM
,
71 BRW_SURFACEFORMAT_R32G32B32_UNORM
,
72 BRW_SURFACEFORMAT_R32G32B32A32_UNORM
75 static GLuint uint_types_scale
[5] = {
77 BRW_SURFACEFORMAT_R32_USCALED
,
78 BRW_SURFACEFORMAT_R32G32_USCALED
,
79 BRW_SURFACEFORMAT_R32G32B32_USCALED
,
80 BRW_SURFACEFORMAT_R32G32B32A32_USCALED
83 static GLuint int_types_norm
[5] = {
85 BRW_SURFACEFORMAT_R32_SNORM
,
86 BRW_SURFACEFORMAT_R32G32_SNORM
,
87 BRW_SURFACEFORMAT_R32G32B32_SNORM
,
88 BRW_SURFACEFORMAT_R32G32B32A32_SNORM
91 static GLuint int_types_scale
[5] = {
93 BRW_SURFACEFORMAT_R32_SSCALED
,
94 BRW_SURFACEFORMAT_R32G32_SSCALED
,
95 BRW_SURFACEFORMAT_R32G32B32_SSCALED
,
96 BRW_SURFACEFORMAT_R32G32B32A32_SSCALED
99 static GLuint ushort_types_norm
[5] = {
101 BRW_SURFACEFORMAT_R16_UNORM
,
102 BRW_SURFACEFORMAT_R16G16_UNORM
,
103 BRW_SURFACEFORMAT_R16G16B16_UNORM
,
104 BRW_SURFACEFORMAT_R16G16B16A16_UNORM
107 static GLuint ushort_types_scale
[5] = {
109 BRW_SURFACEFORMAT_R16_USCALED
,
110 BRW_SURFACEFORMAT_R16G16_USCALED
,
111 BRW_SURFACEFORMAT_R16G16B16_USCALED
,
112 BRW_SURFACEFORMAT_R16G16B16A16_USCALED
115 static GLuint short_types_norm
[5] = {
117 BRW_SURFACEFORMAT_R16_SNORM
,
118 BRW_SURFACEFORMAT_R16G16_SNORM
,
119 BRW_SURFACEFORMAT_R16G16B16_SNORM
,
120 BRW_SURFACEFORMAT_R16G16B16A16_SNORM
123 static GLuint short_types_scale
[5] = {
125 BRW_SURFACEFORMAT_R16_SSCALED
,
126 BRW_SURFACEFORMAT_R16G16_SSCALED
,
127 BRW_SURFACEFORMAT_R16G16B16_SSCALED
,
128 BRW_SURFACEFORMAT_R16G16B16A16_SSCALED
131 static GLuint ubyte_types_norm
[5] = {
133 BRW_SURFACEFORMAT_R8_UNORM
,
134 BRW_SURFACEFORMAT_R8G8_UNORM
,
135 BRW_SURFACEFORMAT_R8G8B8_UNORM
,
136 BRW_SURFACEFORMAT_R8G8B8A8_UNORM
139 static GLuint ubyte_types_scale
[5] = {
141 BRW_SURFACEFORMAT_R8_USCALED
,
142 BRW_SURFACEFORMAT_R8G8_USCALED
,
143 BRW_SURFACEFORMAT_R8G8B8_USCALED
,
144 BRW_SURFACEFORMAT_R8G8B8A8_USCALED
147 static GLuint byte_types_norm
[5] = {
149 BRW_SURFACEFORMAT_R8_SNORM
,
150 BRW_SURFACEFORMAT_R8G8_SNORM
,
151 BRW_SURFACEFORMAT_R8G8B8_SNORM
,
152 BRW_SURFACEFORMAT_R8G8B8A8_SNORM
155 static GLuint byte_types_scale
[5] = {
157 BRW_SURFACEFORMAT_R8_SSCALED
,
158 BRW_SURFACEFORMAT_R8G8_SSCALED
,
159 BRW_SURFACEFORMAT_R8G8B8_SSCALED
,
160 BRW_SURFACEFORMAT_R8G8B8A8_SSCALED
165 * Given vertex array type/size/format/normalized info, return
166 * the appopriate hardware surface type.
167 * Format will be GL_RGBA or possibly GL_BGRA for GLubyte[4] color arrays.
169 static GLuint
get_surface_type( GLenum type
, GLuint size
,
170 GLenum format
, GLboolean normalized
)
172 if (unlikely(INTEL_DEBUG
& DEBUG_VERTS
))
173 printf("type %s size %d normalized %d\n",
174 _mesa_lookup_enum_by_nr(type
), size
, normalized
);
178 case GL_DOUBLE
: return double_types
[size
];
179 case GL_FLOAT
: return float_types
[size
];
180 case GL_HALF_FLOAT
: return half_float_types
[size
];
181 case GL_INT
: return int_types_norm
[size
];
182 case GL_SHORT
: return short_types_norm
[size
];
183 case GL_BYTE
: return byte_types_norm
[size
];
184 case GL_UNSIGNED_INT
: return uint_types_norm
[size
];
185 case GL_UNSIGNED_SHORT
: return ushort_types_norm
[size
];
186 case GL_UNSIGNED_BYTE
:
187 if (format
== GL_BGRA
) {
188 /* See GL_EXT_vertex_array_bgra */
190 return BRW_SURFACEFORMAT_B8G8R8A8_UNORM
;
193 return ubyte_types_norm
[size
];
195 default: assert(0); return 0;
199 assert(format
== GL_RGBA
); /* sanity check */
201 case GL_DOUBLE
: return double_types
[size
];
202 case GL_FLOAT
: return float_types
[size
];
203 case GL_HALF_FLOAT
: return half_float_types
[size
];
204 case GL_INT
: return int_types_scale
[size
];
205 case GL_SHORT
: return short_types_scale
[size
];
206 case GL_BYTE
: return byte_types_scale
[size
];
207 case GL_UNSIGNED_INT
: return uint_types_scale
[size
];
208 case GL_UNSIGNED_SHORT
: return ushort_types_scale
[size
];
209 case GL_UNSIGNED_BYTE
: return ubyte_types_scale
[size
];
210 /* This produces GL_FIXED inputs as values between INT32_MIN and
211 * INT32_MAX, which will be scaled down by 1/65536 by the VS.
213 case GL_FIXED
: return int_types_scale
[size
];
214 default: assert(0); return 0;
220 static GLuint
get_size( GLenum type
)
223 case GL_DOUBLE
: return sizeof(GLdouble
);
224 case GL_FLOAT
: return sizeof(GLfloat
);
225 case GL_HALF_FLOAT
: return sizeof(GLhalfARB
);
226 case GL_INT
: return sizeof(GLint
);
227 case GL_SHORT
: return sizeof(GLshort
);
228 case GL_BYTE
: return sizeof(GLbyte
);
229 case GL_UNSIGNED_INT
: return sizeof(GLuint
);
230 case GL_UNSIGNED_SHORT
: return sizeof(GLushort
);
231 case GL_UNSIGNED_BYTE
: return sizeof(GLubyte
);
232 case GL_FIXED
: return sizeof(GLuint
);
233 default: assert(0); return 0;
237 static GLuint
get_index_type(GLenum type
)
240 case GL_UNSIGNED_BYTE
: return BRW_INDEX_BYTE
;
241 case GL_UNSIGNED_SHORT
: return BRW_INDEX_WORD
;
242 case GL_UNSIGNED_INT
: return BRW_INDEX_DWORD
;
243 default: assert(0); return 0;
248 copy_array_to_vbo_array(struct brw_context
*brw
,
249 struct brw_vertex_element
*element
,
251 struct brw_vertex_buffer
*buffer
,
254 int src_stride
= element
->glarray
->StrideB
;
255 const unsigned char *src
= element
->glarray
->Ptr
+ min
* src_stride
;
256 int count
= max
- min
+ 1;
257 GLuint size
= count
* dst_stride
;
259 if (dst_stride
== src_stride
) {
260 intel_upload_data(&brw
->intel
, src
, size
, dst_stride
,
261 &buffer
->bo
, &buffer
->offset
);
263 char * const map
= intel_upload_map(&brw
->intel
, size
, dst_stride
);
267 memcpy(dst
, src
, dst_stride
);
271 intel_upload_unmap(&brw
->intel
, map
, size
, dst_stride
,
272 &buffer
->bo
, &buffer
->offset
);
274 buffer
->stride
= dst_stride
;
277 static void brw_prepare_vertices(struct brw_context
*brw
)
279 struct gl_context
*ctx
= &brw
->intel
.ctx
;
280 struct intel_context
*intel
= intel_context(ctx
);
281 /* CACHE_NEW_VS_PROG */
282 GLbitfield vs_inputs
= brw
->vs
.prog_data
->inputs_read
;
283 const unsigned char *ptr
= NULL
;
284 GLuint interleaved
= 0, total_size
= 0;
285 unsigned int min_index
= brw
->vb
.min_index
;
286 unsigned int max_index
= brw
->vb
.max_index
;
289 struct brw_vertex_element
*upload
[VERT_ATTRIB_MAX
];
290 GLuint nr_uploads
= 0;
292 /* First build an array of pointers to ve's in vb.inputs_read
295 printf("%s %d..%d\n", __FUNCTION__
, min_index
, max_index
);
297 /* Accumulate the list of enabled arrays. */
298 brw
->vb
.nr_enabled
= 0;
300 GLuint i
= ffs(vs_inputs
) - 1;
301 struct brw_vertex_element
*input
= &brw
->vb
.inputs
[i
];
303 vs_inputs
&= ~(1 << i
);
304 if (input
->glarray
->Size
&& get_size(input
->glarray
->Type
))
305 brw
->vb
.enabled
[brw
->vb
.nr_enabled
++] = input
;
308 if (brw
->vb
.nr_enabled
== 0)
311 if (brw
->vb
.nr_buffers
)
314 /* XXX: In the rare cases where this happens we fallback all
315 * the way to software rasterization, although a tnl fallback
316 * would be sufficient. I don't know of *any* real world
317 * cases with > 17 vertex attributes enabled, so it probably
318 * isn't an issue at this point.
320 if (brw
->vb
.nr_enabled
>= BRW_VEP_MAX
) {
321 intel
->Fallback
= GL_TRUE
; /* boolean, not bitfield */
325 for (i
= j
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
326 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
327 const struct gl_client_array
*glarray
= input
->glarray
;
328 int type_size
= get_size(glarray
->Type
);
330 input
->element_size
= type_size
* glarray
->Size
;
332 if (_mesa_is_bufferobj(glarray
->BufferObj
)) {
333 struct intel_buffer_object
*intel_buffer
=
334 intel_buffer_object(glarray
->BufferObj
);
337 for (k
= 0; k
< i
; k
++) {
338 const struct gl_client_array
*other
= brw
->vb
.enabled
[k
]->glarray
;
339 if (glarray
->BufferObj
== other
->BufferObj
&&
340 glarray
->StrideB
== other
->StrideB
&&
341 (uintptr_t)(glarray
->Ptr
- other
->Ptr
) < glarray
->StrideB
)
343 input
->buffer
= brw
->vb
.enabled
[k
]->buffer
;
344 input
->offset
= glarray
->Ptr
- other
->Ptr
;
349 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[j
];
351 /* Named buffer object: Just reference its contents directly. */
352 buffer
->bo
= intel_bufferobj_source(intel
,
353 intel_buffer
, type_size
,
355 drm_intel_bo_reference(buffer
->bo
);
356 buffer
->offset
+= (uintptr_t)glarray
->Ptr
;
357 buffer
->stride
= glarray
->StrideB
;
363 /* This is a common place to reach if the user mistakenly supplies
364 * a pointer in place of a VBO offset. If we just let it go through,
365 * we may end up dereferencing a pointer beyond the bounds of the
366 * GTT. We would hope that the VBO's max_index would save us, but
367 * Mesa appears to hand us min/max values not clipped to the
368 * array object's _MaxElement, and _MaxElement frequently appears
369 * to be wrong anyway.
371 * The VBO spec allows application termination in this case, and it's
372 * probably a service to the poor programmer to do so rather than
373 * trying to just not render.
375 assert(input
->offset
< brw
->vb
.buffers
[input
->buffer
].bo
->size
);
377 /* Queue the buffer object up to be uploaded in the next pass,
378 * when we've decided if we're doing interleaved or not.
380 if (nr_uploads
== 0) {
381 /* Position array not properly enabled:
383 if (input
->attrib
== VERT_ATTRIB_POS
&& glarray
->StrideB
== 0) {
384 intel
->Fallback
= GL_TRUE
; /* boolean, not bitfield */
388 interleaved
= glarray
->StrideB
;
391 else if (interleaved
!= glarray
->StrideB
||
392 (uintptr_t)(glarray
->Ptr
- ptr
) > interleaved
)
396 else if ((uintptr_t)(glarray
->Ptr
- ptr
) & (type_size
-1))
398 /* enforce natural alignment (for doubles) */
402 upload
[nr_uploads
++] = input
;
403 total_size
= ALIGN(total_size
, type_size
);
404 total_size
+= input
->element_size
;
408 /* If we need to upload all the arrays, then we can trim those arrays to
409 * only the used elements [min_index, max_index] so long as we adjust all
410 * the values used in the 3DPRIMITIVE i.e. by setting the vertex bias.
412 brw
->vb
.start_vertex_bias
= 0;
414 if (nr_uploads
== brw
->vb
.nr_enabled
) {
415 brw
->vb
.start_vertex_bias
= -delta
;
418 if (delta
&& !brw
->intel
.intelScreen
->relaxed_relocations
)
419 min_index
= delta
= 0;
421 /* Handle any arrays to be uploaded. */
422 if (nr_uploads
> 1) {
423 if (interleaved
&& interleaved
<= 2*total_size
) {
424 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[j
];
425 /* All uploads are interleaved, so upload the arrays together as
426 * interleaved. First, upload the contents and set up upload[0].
428 copy_array_to_vbo_array(brw
, upload
[0], min_index
, max_index
,
429 buffer
, interleaved
);
430 buffer
->offset
-= delta
* interleaved
;
432 for (i
= 0; i
< nr_uploads
; i
++) {
433 /* Then, just point upload[i] at upload[0]'s buffer. */
435 ((const unsigned char *)upload
[i
]->glarray
->Ptr
- ptr
);
436 upload
[i
]->buffer
= j
;
442 else if (total_size
< 2048) {
443 /* Upload non-interleaved arrays into a single interleaved array */
444 struct brw_vertex_buffer
*buffer
;
445 int count
= max_index
- min_index
+ 1;
449 map
= intel_upload_map(&brw
->intel
, total_size
* count
, total_size
);
450 for (i
= offset
= 0; i
< nr_uploads
; i
++) {
451 const unsigned char *src
= upload
[i
]->glarray
->Ptr
;
452 int size
= upload
[i
]->element_size
;
453 int stride
= upload
[i
]->glarray
->StrideB
;
457 offset
= ALIGN(offset
, get_size(upload
[i
]->glarray
->Type
));
459 src
+= min_index
* stride
;
461 for (n
= 0; n
< count
; n
++) {
462 memcpy(dst
, src
, size
);
467 upload
[i
]->offset
= offset
;
468 upload
[i
]->buffer
= j
;
472 assert(offset
== total_size
);
473 buffer
= &brw
->vb
.buffers
[j
++];
474 intel_upload_unmap(&brw
->intel
, map
, offset
* count
, offset
,
475 &buffer
->bo
, &buffer
->offset
);
476 buffer
->stride
= offset
;
477 buffer
->offset
-= delta
* offset
;
482 /* Upload non-interleaved arrays */
483 for (i
= 0; i
< nr_uploads
; i
++) {
484 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[j
];
485 copy_array_to_vbo_array(brw
, upload
[i
], min_index
, max_index
,
486 buffer
, upload
[i
]->element_size
);
487 buffer
->offset
-= delta
* buffer
->stride
;
488 upload
[i
]->buffer
= j
++;
489 upload
[i
]->offset
= 0;
492 /* can we simply extend the current vb? */
493 if (j
== brw
->vb
.nr_current_buffers
) {
495 for (i
= 0; i
< j
; i
++) {
498 if (brw
->vb
.current_buffers
[i
].handle
!= brw
->vb
.buffers
[i
].bo
->handle
||
499 brw
->vb
.current_buffers
[i
].stride
!= brw
->vb
.buffers
[i
].stride
)
502 d
= brw
->vb
.buffers
[i
].offset
- brw
->vb
.current_buffers
[i
].offset
;
506 delta
= d
/ brw
->vb
.current_buffers
[i
].stride
;
507 if (delta
* brw
->vb
.current_buffers
[i
].stride
!= d
)
512 brw
->vb
.start_vertex_bias
+= delta
;
514 drm_intel_bo_unreference(brw
->vb
.buffers
[j
].bo
);
519 brw
->vb
.nr_buffers
= j
;
522 brw_prepare_query_begin(brw
);
523 for (i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
524 brw_add_validated_bo(brw
, brw
->vb
.buffers
[i
].bo
);
528 static void brw_emit_vertices(struct brw_context
*brw
)
530 struct gl_context
*ctx
= &brw
->intel
.ctx
;
531 struct intel_context
*intel
= intel_context(ctx
);
534 brw_emit_query_begin(brw
);
536 /* If the VS doesn't read any inputs (calculating vertex position from
537 * a state variable for some reason, for example), emit a single pad
538 * VERTEX_ELEMENT struct and bail.
540 * The stale VB state stays in place, but they don't do anything unless
541 * a VE loads from them.
543 if (brw
->vb
.nr_enabled
== 0) {
545 OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS
<< 16) | 1);
546 if (intel
->gen
>= 6) {
547 OUT_BATCH((0 << GEN6_VE0_INDEX_SHIFT
) |
549 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
<< BRW_VE0_FORMAT_SHIFT
) |
550 (0 << BRW_VE0_SRC_OFFSET_SHIFT
));
552 OUT_BATCH((0 << BRW_VE0_INDEX_SHIFT
) |
554 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
<< BRW_VE0_FORMAT_SHIFT
) |
555 (0 << BRW_VE0_SRC_OFFSET_SHIFT
));
557 OUT_BATCH((BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
558 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_1_SHIFT
) |
559 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_2_SHIFT
) |
560 (BRW_VE1_COMPONENT_STORE_1_FLT
<< BRW_VE1_COMPONENT_3_SHIFT
));
565 /* Now emit VB and VEP state packets.
568 if (brw
->vb
.nr_buffers
) {
569 BEGIN_BATCH(1 + 4*brw
->vb
.nr_buffers
);
570 OUT_BATCH((_3DSTATE_VERTEX_BUFFERS
<< 16) | (4*brw
->vb
.nr_buffers
- 1));
571 for (i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
572 struct brw_vertex_buffer
*buffer
= &brw
->vb
.buffers
[i
];
575 if (intel
->gen
>= 6) {
576 dw0
= GEN6_VB0_ACCESS_VERTEXDATA
| (i
<< GEN6_VB0_INDEX_SHIFT
);
578 dw0
= BRW_VB0_ACCESS_VERTEXDATA
| (i
<< BRW_VB0_INDEX_SHIFT
);
582 dw0
|= GEN7_VB0_ADDRESS_MODIFYENABLE
;
584 OUT_BATCH(dw0
| (buffer
->stride
<< BRW_VB0_PITCH_SHIFT
));
585 OUT_RELOC(buffer
->bo
, I915_GEM_DOMAIN_VERTEX
, 0, buffer
->offset
);
586 if (intel
->gen
>= 5) {
587 OUT_RELOC(buffer
->bo
, I915_GEM_DOMAIN_VERTEX
, 0, buffer
->bo
->size
- 1);
590 OUT_BATCH(0); /* Instance data step rate */
592 brw
->vb
.current_buffers
[i
].handle
= buffer
->bo
->handle
;
593 brw
->vb
.current_buffers
[i
].offset
= buffer
->offset
;
594 brw
->vb
.current_buffers
[i
].stride
= buffer
->stride
;
596 brw
->vb
.nr_current_buffers
= i
;
600 BEGIN_BATCH(1 + brw
->vb
.nr_enabled
* 2);
601 OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS
<< 16) | (2*brw
->vb
.nr_enabled
- 1));
602 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
603 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
604 uint32_t format
= get_surface_type(input
->glarray
->Type
,
605 input
->glarray
->Size
,
606 input
->glarray
->Format
,
607 input
->glarray
->Normalized
);
608 uint32_t comp0
= BRW_VE1_COMPONENT_STORE_SRC
;
609 uint32_t comp1
= BRW_VE1_COMPONENT_STORE_SRC
;
610 uint32_t comp2
= BRW_VE1_COMPONENT_STORE_SRC
;
611 uint32_t comp3
= BRW_VE1_COMPONENT_STORE_SRC
;
613 switch (input
->glarray
->Size
) {
614 case 0: comp0
= BRW_VE1_COMPONENT_STORE_0
;
615 case 1: comp1
= BRW_VE1_COMPONENT_STORE_0
;
616 case 2: comp2
= BRW_VE1_COMPONENT_STORE_0
;
617 case 3: comp3
= BRW_VE1_COMPONENT_STORE_1_FLT
;
621 if (intel
->gen
>= 6) {
622 OUT_BATCH((input
->buffer
<< GEN6_VE0_INDEX_SHIFT
) |
624 (format
<< BRW_VE0_FORMAT_SHIFT
) |
625 (input
->offset
<< BRW_VE0_SRC_OFFSET_SHIFT
));
627 OUT_BATCH((input
->buffer
<< BRW_VE0_INDEX_SHIFT
) |
629 (format
<< BRW_VE0_FORMAT_SHIFT
) |
630 (input
->offset
<< BRW_VE0_SRC_OFFSET_SHIFT
));
634 OUT_BATCH((comp0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
635 (comp1
<< BRW_VE1_COMPONENT_1_SHIFT
) |
636 (comp2
<< BRW_VE1_COMPONENT_2_SHIFT
) |
637 (comp3
<< BRW_VE1_COMPONENT_3_SHIFT
));
639 OUT_BATCH((comp0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
640 (comp1
<< BRW_VE1_COMPONENT_1_SHIFT
) |
641 (comp2
<< BRW_VE1_COMPONENT_2_SHIFT
) |
642 (comp3
<< BRW_VE1_COMPONENT_3_SHIFT
) |
643 ((i
* 4) << BRW_VE1_DST_OFFSET_SHIFT
));
648 const struct brw_tracked_state brw_vertices
= {
651 .brw
= BRW_NEW_BATCH
| BRW_NEW_VERTICES
,
652 .cache
= CACHE_NEW_VS_PROG
,
654 .prepare
= brw_prepare_vertices
,
655 .emit
= brw_emit_vertices
,
658 static void brw_prepare_indices(struct brw_context
*brw
)
660 struct gl_context
*ctx
= &brw
->intel
.ctx
;
661 struct intel_context
*intel
= &brw
->intel
;
662 const struct _mesa_index_buffer
*index_buffer
= brw
->ib
.ib
;
664 drm_intel_bo
*bo
= NULL
;
665 struct gl_buffer_object
*bufferobj
;
669 if (index_buffer
== NULL
)
672 ib_type_size
= get_size(index_buffer
->type
);
673 ib_size
= ib_type_size
* index_buffer
->count
;
674 bufferobj
= index_buffer
->obj
;
676 /* Turn into a proper VBO:
678 if (!_mesa_is_bufferobj(bufferobj
)) {
680 /* Get new bufferobj, offset:
682 intel_upload_data(&brw
->intel
, index_buffer
->ptr
, ib_size
, ib_type_size
,
684 brw
->ib
.start_vertex_offset
= offset
/ ib_type_size
;
686 offset
= (GLuint
) (unsigned long) index_buffer
->ptr
;
688 /* If the index buffer isn't aligned to its element size, we have to
689 * rebase it into a temporary.
691 if ((get_size(index_buffer
->type
) - 1) & offset
) {
692 GLubyte
*map
= ctx
->Driver
.MapBuffer(ctx
,
693 GL_ELEMENT_ARRAY_BUFFER_ARB
,
698 intel_upload_data(&brw
->intel
, map
, ib_size
, ib_type_size
,
700 brw
->ib
.start_vertex_offset
= offset
/ ib_type_size
;
702 ctx
->Driver
.UnmapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER_ARB
, bufferobj
);
704 /* Use CMD_3D_PRIM's start_vertex_offset to avoid re-uploading
705 * the index buffer state when we're just moving the start index
708 brw
->ib
.start_vertex_offset
= offset
/ ib_type_size
;
710 bo
= intel_bufferobj_source(intel
,
711 intel_buffer_object(bufferobj
),
714 drm_intel_bo_reference(bo
);
716 brw
->ib
.start_vertex_offset
+= offset
/ ib_type_size
;
720 if (brw
->ib
.bo
!= bo
) {
721 drm_intel_bo_unreference(brw
->ib
.bo
);
724 brw_add_validated_bo(brw
, brw
->ib
.bo
);
725 brw
->state
.dirty
.brw
|= BRW_NEW_INDEX_BUFFER
;
727 drm_intel_bo_unreference(bo
);
730 if (index_buffer
->type
!= brw
->ib
.type
) {
731 brw
->ib
.type
= index_buffer
->type
;
732 brw
->state
.dirty
.brw
|= BRW_NEW_INDEX_BUFFER
;
736 const struct brw_tracked_state brw_indices
= {
739 .brw
= BRW_NEW_INDICES
,
742 .prepare
= brw_prepare_indices
,
745 static void brw_emit_index_buffer(struct brw_context
*brw
)
747 struct intel_context
*intel
= &brw
->intel
;
748 const struct _mesa_index_buffer
*index_buffer
= brw
->ib
.ib
;
750 if (index_buffer
== NULL
)
754 OUT_BATCH(CMD_INDEX_BUFFER
<< 16 |
755 /* cut index enable << 10 */
756 get_index_type(index_buffer
->type
) << 8 |
758 OUT_RELOC(brw
->ib
.bo
,
759 I915_GEM_DOMAIN_VERTEX
, 0,
761 OUT_RELOC(brw
->ib
.bo
,
762 I915_GEM_DOMAIN_VERTEX
, 0,
763 brw
->ib
.bo
->size
- 1);
767 const struct brw_tracked_state brw_index_buffer
= {
770 .brw
= BRW_NEW_BATCH
| BRW_NEW_INDEX_BUFFER
,
773 .emit
= brw_emit_index_buffer
,