1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 #include "main/glheader.h"
31 #include "main/bufferobj.h"
32 #include "main/context.h"
33 #include "main/enums.h"
36 #include "brw_defines.h"
37 #include "brw_context.h"
38 #include "brw_state.h"
40 #include "intel_batchbuffer.h"
41 #include "intel_buffer_objects.h"
43 static GLuint double_types
[5] = {
45 BRW_SURFACEFORMAT_R64_FLOAT
,
46 BRW_SURFACEFORMAT_R64G64_FLOAT
,
47 BRW_SURFACEFORMAT_R64G64B64_FLOAT
,
48 BRW_SURFACEFORMAT_R64G64B64A64_FLOAT
51 static GLuint float_types
[5] = {
53 BRW_SURFACEFORMAT_R32_FLOAT
,
54 BRW_SURFACEFORMAT_R32G32_FLOAT
,
55 BRW_SURFACEFORMAT_R32G32B32_FLOAT
,
56 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
59 static GLuint half_float_types
[5] = {
61 BRW_SURFACEFORMAT_R16_FLOAT
,
62 BRW_SURFACEFORMAT_R16G16_FLOAT
,
63 BRW_SURFACEFORMAT_R16G16B16A16_FLOAT
,
64 BRW_SURFACEFORMAT_R16G16B16A16_FLOAT
67 static GLuint uint_types_norm
[5] = {
69 BRW_SURFACEFORMAT_R32_UNORM
,
70 BRW_SURFACEFORMAT_R32G32_UNORM
,
71 BRW_SURFACEFORMAT_R32G32B32_UNORM
,
72 BRW_SURFACEFORMAT_R32G32B32A32_UNORM
75 static GLuint uint_types_scale
[5] = {
77 BRW_SURFACEFORMAT_R32_USCALED
,
78 BRW_SURFACEFORMAT_R32G32_USCALED
,
79 BRW_SURFACEFORMAT_R32G32B32_USCALED
,
80 BRW_SURFACEFORMAT_R32G32B32A32_USCALED
83 static GLuint int_types_norm
[5] = {
85 BRW_SURFACEFORMAT_R32_SNORM
,
86 BRW_SURFACEFORMAT_R32G32_SNORM
,
87 BRW_SURFACEFORMAT_R32G32B32_SNORM
,
88 BRW_SURFACEFORMAT_R32G32B32A32_SNORM
91 static GLuint int_types_scale
[5] = {
93 BRW_SURFACEFORMAT_R32_SSCALED
,
94 BRW_SURFACEFORMAT_R32G32_SSCALED
,
95 BRW_SURFACEFORMAT_R32G32B32_SSCALED
,
96 BRW_SURFACEFORMAT_R32G32B32A32_SSCALED
99 static GLuint ushort_types_norm
[5] = {
101 BRW_SURFACEFORMAT_R16_UNORM
,
102 BRW_SURFACEFORMAT_R16G16_UNORM
,
103 BRW_SURFACEFORMAT_R16G16B16_UNORM
,
104 BRW_SURFACEFORMAT_R16G16B16A16_UNORM
107 static GLuint ushort_types_scale
[5] = {
109 BRW_SURFACEFORMAT_R16_USCALED
,
110 BRW_SURFACEFORMAT_R16G16_USCALED
,
111 BRW_SURFACEFORMAT_R16G16B16_USCALED
,
112 BRW_SURFACEFORMAT_R16G16B16A16_USCALED
115 static GLuint short_types_norm
[5] = {
117 BRW_SURFACEFORMAT_R16_SNORM
,
118 BRW_SURFACEFORMAT_R16G16_SNORM
,
119 BRW_SURFACEFORMAT_R16G16B16_SNORM
,
120 BRW_SURFACEFORMAT_R16G16B16A16_SNORM
123 static GLuint short_types_scale
[5] = {
125 BRW_SURFACEFORMAT_R16_SSCALED
,
126 BRW_SURFACEFORMAT_R16G16_SSCALED
,
127 BRW_SURFACEFORMAT_R16G16B16_SSCALED
,
128 BRW_SURFACEFORMAT_R16G16B16A16_SSCALED
131 static GLuint ubyte_types_norm
[5] = {
133 BRW_SURFACEFORMAT_R8_UNORM
,
134 BRW_SURFACEFORMAT_R8G8_UNORM
,
135 BRW_SURFACEFORMAT_R8G8B8_UNORM
,
136 BRW_SURFACEFORMAT_R8G8B8A8_UNORM
139 static GLuint ubyte_types_scale
[5] = {
141 BRW_SURFACEFORMAT_R8_USCALED
,
142 BRW_SURFACEFORMAT_R8G8_USCALED
,
143 BRW_SURFACEFORMAT_R8G8B8_USCALED
,
144 BRW_SURFACEFORMAT_R8G8B8A8_USCALED
147 static GLuint byte_types_norm
[5] = {
149 BRW_SURFACEFORMAT_R8_SNORM
,
150 BRW_SURFACEFORMAT_R8G8_SNORM
,
151 BRW_SURFACEFORMAT_R8G8B8_SNORM
,
152 BRW_SURFACEFORMAT_R8G8B8A8_SNORM
155 static GLuint byte_types_scale
[5] = {
157 BRW_SURFACEFORMAT_R8_SSCALED
,
158 BRW_SURFACEFORMAT_R8G8_SSCALED
,
159 BRW_SURFACEFORMAT_R8G8B8_SSCALED
,
160 BRW_SURFACEFORMAT_R8G8B8A8_SSCALED
165 * Given vertex array type/size/format/normalized info, return
166 * the appopriate hardware surface type.
167 * Format will be GL_RGBA or possibly GL_BGRA for GLubyte[4] color arrays.
169 static GLuint
get_surface_type( GLenum type
, GLuint size
,
170 GLenum format
, GLboolean normalized
)
172 if (unlikely(INTEL_DEBUG
& DEBUG_VERTS
))
173 printf("type %s size %d normalized %d\n",
174 _mesa_lookup_enum_by_nr(type
), size
, normalized
);
178 case GL_DOUBLE
: return double_types
[size
];
179 case GL_FLOAT
: return float_types
[size
];
180 case GL_HALF_FLOAT
: return half_float_types
[size
];
181 case GL_INT
: return int_types_norm
[size
];
182 case GL_SHORT
: return short_types_norm
[size
];
183 case GL_BYTE
: return byte_types_norm
[size
];
184 case GL_UNSIGNED_INT
: return uint_types_norm
[size
];
185 case GL_UNSIGNED_SHORT
: return ushort_types_norm
[size
];
186 case GL_UNSIGNED_BYTE
:
187 if (format
== GL_BGRA
) {
188 /* See GL_EXT_vertex_array_bgra */
190 return BRW_SURFACEFORMAT_B8G8R8A8_UNORM
;
193 return ubyte_types_norm
[size
];
195 default: assert(0); return 0;
199 assert(format
== GL_RGBA
); /* sanity check */
201 case GL_DOUBLE
: return double_types
[size
];
202 case GL_FLOAT
: return float_types
[size
];
203 case GL_HALF_FLOAT
: return half_float_types
[size
];
204 case GL_INT
: return int_types_scale
[size
];
205 case GL_SHORT
: return short_types_scale
[size
];
206 case GL_BYTE
: return byte_types_scale
[size
];
207 case GL_UNSIGNED_INT
: return uint_types_scale
[size
];
208 case GL_UNSIGNED_SHORT
: return ushort_types_scale
[size
];
209 case GL_UNSIGNED_BYTE
: return ubyte_types_scale
[size
];
210 default: assert(0); return 0;
216 static GLuint
get_size( GLenum type
)
219 case GL_DOUBLE
: return sizeof(GLdouble
);
220 case GL_FLOAT
: return sizeof(GLfloat
);
221 case GL_HALF_FLOAT
: return sizeof(GLhalfARB
);
222 case GL_INT
: return sizeof(GLint
);
223 case GL_SHORT
: return sizeof(GLshort
);
224 case GL_BYTE
: return sizeof(GLbyte
);
225 case GL_UNSIGNED_INT
: return sizeof(GLuint
);
226 case GL_UNSIGNED_SHORT
: return sizeof(GLushort
);
227 case GL_UNSIGNED_BYTE
: return sizeof(GLubyte
);
232 static GLuint
get_index_type(GLenum type
)
235 case GL_UNSIGNED_BYTE
: return BRW_INDEX_BYTE
;
236 case GL_UNSIGNED_SHORT
: return BRW_INDEX_WORD
;
237 case GL_UNSIGNED_INT
: return BRW_INDEX_DWORD
;
238 default: assert(0); return 0;
243 copy_array_to_vbo_array( struct brw_context
*brw
,
244 struct brw_vertex_element
*element
,
247 GLuint size
= element
->count
* dst_stride
;
249 if (element
->glarray
->StrideB
== 0) {
250 assert(element
->count
== 1);
253 element
->stride
= dst_stride
;
256 if (dst_stride
== element
->glarray
->StrideB
) {
257 intel_upload_data(&brw
->intel
, element
->glarray
->Ptr
, size
,
258 &element
->bo
, &element
->offset
);
260 const unsigned char *src
= element
->glarray
->Ptr
;
261 char *dst
= intel_upload_map(&brw
->intel
, size
,
262 &element
->bo
, &element
->offset
);
265 for (i
= 0; i
< element
->count
; i
++) {
266 memcpy(dst
, src
, dst_stride
);
267 src
+= element
->glarray
->StrideB
;
273 static void brw_prepare_vertices(struct brw_context
*brw
)
275 struct gl_context
*ctx
= &brw
->intel
.ctx
;
276 struct intel_context
*intel
= intel_context(ctx
);
277 GLbitfield vs_inputs
= brw
->vs
.prog_data
->inputs_read
;
279 const unsigned char *ptr
= NULL
;
280 GLuint interleave
= 0;
281 unsigned int min_index
= brw
->vb
.min_index
;
282 unsigned int max_index
= brw
->vb
.max_index
;
284 struct brw_vertex_element
*upload
[VERT_ATTRIB_MAX
];
285 GLuint nr_uploads
= 0;
287 /* First build an array of pointers to ve's in vb.inputs_read
290 printf("%s %d..%d\n", __FUNCTION__
, min_index
, max_index
);
292 /* Accumulate the list of enabled arrays. */
293 brw
->vb
.nr_enabled
= 0;
295 GLuint i
= _mesa_ffsll(vs_inputs
) - 1;
296 struct brw_vertex_element
*input
= &brw
->vb
.inputs
[i
];
298 vs_inputs
&= ~(1 << i
);
299 brw
->vb
.enabled
[brw
->vb
.nr_enabled
++] = input
;
302 if (brw
->vb
.nr_enabled
== 0)
305 /* XXX: In the rare cases where this happens we fallback all
306 * the way to software rasterization, although a tnl fallback
307 * would be sufficient. I don't know of *any* real world
308 * cases with > 17 vertex attributes enabled, so it probably
309 * isn't an issue at this point.
311 if (brw
->vb
.nr_enabled
>= BRW_VEP_MAX
) {
312 intel
->Fallback
= GL_TRUE
; /* boolean, not bitfield */
316 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
317 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
319 input
->element_size
= get_size(input
->glarray
->Type
) * input
->glarray
->Size
;
321 if (_mesa_is_bufferobj(input
->glarray
->BufferObj
)) {
322 struct intel_buffer_object
*intel_buffer
=
323 intel_buffer_object(input
->glarray
->BufferObj
);
326 /* Named buffer object: Just reference its contents directly. */
327 drm_intel_bo_unreference(input
->bo
);
328 input
->bo
= intel_bufferobj_source(intel
, intel_buffer
, &offset
);
329 drm_intel_bo_reference(input
->bo
);
330 input
->offset
= offset
+ (unsigned long)input
->glarray
->Ptr
;
331 input
->stride
= input
->glarray
->StrideB
;
332 input
->count
= input
->glarray
->_MaxElement
;
334 /* This is a common place to reach if the user mistakenly supplies
335 * a pointer in place of a VBO offset. If we just let it go through,
336 * we may end up dereferencing a pointer beyond the bounds of the
337 * GTT. We would hope that the VBO's max_index would save us, but
338 * Mesa appears to hand us min/max values not clipped to the
339 * array object's _MaxElement, and _MaxElement frequently appears
340 * to be wrong anyway.
342 * The VBO spec allows application termination in this case, and it's
343 * probably a service to the poor programmer to do so rather than
344 * trying to just not render.
346 assert(input
->offset
< input
->bo
->size
);
348 input
->count
= input
->glarray
->StrideB
? max_index
+ 1 : 1;
349 if (input
->bo
!= NULL
) {
350 /* Already-uploaded vertex data is present from a previous
351 * prepare_vertices, but we had to re-validate state due to
352 * check_aperture failing and a new batch being produced.
357 /* Queue the buffer object up to be uploaded in the next pass,
358 * when we've decided if we're doing interleaved or not.
360 if (input
->attrib
== VERT_ATTRIB_POS
) {
361 /* Position array not properly enabled:
363 if (input
->glarray
->StrideB
== 0) {
364 intel
->Fallback
= GL_TRUE
; /* boolean, not bitfield */
368 interleave
= input
->glarray
->StrideB
;
369 ptr
= input
->glarray
->Ptr
;
371 else if (interleave
!= input
->glarray
->StrideB
||
372 (const unsigned char *)input
->glarray
->Ptr
- ptr
< 0 ||
373 (const unsigned char *)input
->glarray
->Ptr
- ptr
> interleave
)
378 upload
[nr_uploads
++] = input
;
382 /* Handle any arrays to be uploaded. */
383 if (nr_uploads
> 1 && interleave
&& interleave
<= 256) {
384 /* All uploads are interleaved, so upload the arrays together as
385 * interleaved. First, upload the contents and set up upload[0].
387 copy_array_to_vbo_array(brw
, upload
[0], interleave
);
389 for (i
= 1; i
< nr_uploads
; i
++) {
390 /* Then, just point upload[i] at upload[0]'s buffer. */
391 upload
[i
]->stride
= interleave
;
392 upload
[i
]->offset
= upload
[0]->offset
+
393 ((const unsigned char *)upload
[i
]->glarray
->Ptr
- ptr
);
394 upload
[i
]->bo
= upload
[0]->bo
;
395 drm_intel_bo_reference(upload
[i
]->bo
);
399 /* Upload non-interleaved arrays */
400 for (i
= 0; i
< nr_uploads
; i
++) {
401 copy_array_to_vbo_array(brw
, upload
[i
], upload
[i
]->element_size
);
405 brw_prepare_query_begin(brw
);
407 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
408 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
410 brw_add_validated_bo(brw
, input
->bo
);
414 static void brw_emit_vertices(struct brw_context
*brw
)
416 struct gl_context
*ctx
= &brw
->intel
.ctx
;
417 struct intel_context
*intel
= intel_context(ctx
);
420 brw_emit_query_begin(brw
);
422 /* If the VS doesn't read any inputs (calculating vertex position from
423 * a state variable for some reason, for example), emit a single pad
424 * VERTEX_ELEMENT struct and bail.
426 * The stale VB state stays in place, but they don't do anything unless
427 * a VE loads from them.
429 if (brw
->vb
.nr_enabled
== 0) {
431 OUT_BATCH((CMD_VERTEX_ELEMENT
<< 16) | 1);
432 if (intel
->gen
>= 6) {
433 OUT_BATCH((0 << GEN6_VE0_INDEX_SHIFT
) |
435 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
<< BRW_VE0_FORMAT_SHIFT
) |
436 (0 << BRW_VE0_SRC_OFFSET_SHIFT
));
438 OUT_BATCH((0 << BRW_VE0_INDEX_SHIFT
) |
440 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
<< BRW_VE0_FORMAT_SHIFT
) |
441 (0 << BRW_VE0_SRC_OFFSET_SHIFT
));
443 OUT_BATCH((BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
444 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_1_SHIFT
) |
445 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_2_SHIFT
) |
446 (BRW_VE1_COMPONENT_STORE_1_FLT
<< BRW_VE1_COMPONENT_3_SHIFT
));
451 /* Now emit VB and VEP state packets.
453 * This still defines a hardware VB for each input, even if they
454 * are interleaved or from the same VBO. TBD if this makes a
455 * performance difference.
457 BEGIN_BATCH(1 + brw
->vb
.nr_enabled
* 4);
458 OUT_BATCH((CMD_VERTEX_BUFFER
<< 16) |
459 ((1 + brw
->vb
.nr_enabled
* 4) - 2));
461 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
462 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
465 if (intel
->gen
>= 6) {
466 dw0
= GEN6_VB0_ACCESS_VERTEXDATA
|
467 (i
<< GEN6_VB0_INDEX_SHIFT
);
469 dw0
= BRW_VB0_ACCESS_VERTEXDATA
|
470 (i
<< BRW_VB0_INDEX_SHIFT
);
474 (input
->stride
<< BRW_VB0_PITCH_SHIFT
));
476 I915_GEM_DOMAIN_VERTEX
, 0,
478 if (intel
->gen
>= 5) {
480 I915_GEM_DOMAIN_VERTEX
, 0,
481 input
->bo
->size
- 1);
483 OUT_BATCH(input
->stride
? input
->count
: 0);
484 OUT_BATCH(0); /* Instance data step rate */
489 BEGIN_BATCH(1 + brw
->vb
.nr_enabled
* 2);
490 OUT_BATCH((CMD_VERTEX_ELEMENT
<< 16) | ((1 + brw
->vb
.nr_enabled
* 2) - 2));
491 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
492 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
493 uint32_t format
= get_surface_type(input
->glarray
->Type
,
494 input
->glarray
->Size
,
495 input
->glarray
->Format
,
496 input
->glarray
->Normalized
);
497 uint32_t comp0
= BRW_VE1_COMPONENT_STORE_SRC
;
498 uint32_t comp1
= BRW_VE1_COMPONENT_STORE_SRC
;
499 uint32_t comp2
= BRW_VE1_COMPONENT_STORE_SRC
;
500 uint32_t comp3
= BRW_VE1_COMPONENT_STORE_SRC
;
502 switch (input
->glarray
->Size
) {
503 case 0: comp0
= BRW_VE1_COMPONENT_STORE_0
;
504 case 1: comp1
= BRW_VE1_COMPONENT_STORE_0
;
505 case 2: comp2
= BRW_VE1_COMPONENT_STORE_0
;
506 case 3: comp3
= BRW_VE1_COMPONENT_STORE_1_FLT
;
510 if (intel
->gen
>= 6) {
511 OUT_BATCH((i
<< GEN6_VE0_INDEX_SHIFT
) |
513 (format
<< BRW_VE0_FORMAT_SHIFT
) |
514 (0 << BRW_VE0_SRC_OFFSET_SHIFT
));
516 OUT_BATCH((i
<< BRW_VE0_INDEX_SHIFT
) |
518 (format
<< BRW_VE0_FORMAT_SHIFT
) |
519 (0 << BRW_VE0_SRC_OFFSET_SHIFT
));
523 OUT_BATCH((comp0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
524 (comp1
<< BRW_VE1_COMPONENT_1_SHIFT
) |
525 (comp2
<< BRW_VE1_COMPONENT_2_SHIFT
) |
526 (comp3
<< BRW_VE1_COMPONENT_3_SHIFT
));
528 OUT_BATCH((comp0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
529 (comp1
<< BRW_VE1_COMPONENT_1_SHIFT
) |
530 (comp2
<< BRW_VE1_COMPONENT_2_SHIFT
) |
531 (comp3
<< BRW_VE1_COMPONENT_3_SHIFT
) |
532 ((i
* 4) << BRW_VE1_DST_OFFSET_SHIFT
));
537 const struct brw_tracked_state brw_vertices
= {
540 .brw
= BRW_NEW_BATCH
| BRW_NEW_VERTICES
,
543 .prepare
= brw_prepare_vertices
,
544 .emit
= brw_emit_vertices
,
547 static void brw_prepare_indices(struct brw_context
*brw
)
549 struct gl_context
*ctx
= &brw
->intel
.ctx
;
550 struct intel_context
*intel
= &brw
->intel
;
551 const struct _mesa_index_buffer
*index_buffer
= brw
->ib
.ib
;
553 drm_intel_bo
*bo
= NULL
;
554 struct gl_buffer_object
*bufferobj
;
558 if (index_buffer
== NULL
)
561 ib_type_size
= get_size(index_buffer
->type
);
562 ib_size
= ib_type_size
* index_buffer
->count
;
563 bufferobj
= index_buffer
->obj
;
565 /* Turn into a proper VBO:
567 if (!_mesa_is_bufferobj(bufferobj
)) {
568 brw
->ib
.start_vertex_offset
= 0;
570 /* Get new bufferobj, offset:
572 intel_upload_data(&brw
->intel
, index_buffer
->ptr
, ib_size
, &bo
, &offset
);
574 offset
= (GLuint
) (unsigned long) index_buffer
->ptr
;
575 brw
->ib
.start_vertex_offset
= 0;
577 /* If the index buffer isn't aligned to its element size, we have to
578 * rebase it into a temporary.
580 if ((get_size(index_buffer
->type
) - 1) & offset
) {
581 GLubyte
*map
= ctx
->Driver
.MapBuffer(ctx
,
582 GL_ELEMENT_ARRAY_BUFFER_ARB
,
587 intel_upload_data(&brw
->intel
, map
, ib_size
, &bo
, &offset
);
589 ctx
->Driver
.UnmapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER_ARB
, bufferobj
);
591 /* Use CMD_3D_PRIM's start_vertex_offset to avoid re-uploading
592 * the index buffer state when we're just moving the start index
595 brw
->ib
.start_vertex_offset
= offset
/ ib_type_size
;
597 bo
= intel_bufferobj_source(intel
, intel_buffer_object(bufferobj
),
604 if (brw
->ib
.bo
!= bo
||
605 brw
->ib
.offset
!= offset
||
606 brw
->ib
.size
!= ib_size
)
608 drm_intel_bo_unreference(brw
->ib
.bo
);
610 brw
->ib
.offset
= offset
;
611 brw
->ib
.size
= ib_size
;
613 brw
->state
.dirty
.brw
|= BRW_NEW_INDEX_BUFFER
;
615 drm_intel_bo_unreference(bo
);
618 brw_add_validated_bo(brw
, brw
->ib
.bo
);
621 const struct brw_tracked_state brw_indices
= {
624 .brw
= BRW_NEW_INDICES
,
627 .prepare
= brw_prepare_indices
,
630 static void brw_emit_index_buffer(struct brw_context
*brw
)
632 struct intel_context
*intel
= &brw
->intel
;
633 const struct _mesa_index_buffer
*index_buffer
= brw
->ib
.ib
;
635 if (index_buffer
== NULL
)
639 OUT_BATCH(CMD_INDEX_BUFFER
<< 16 |
640 /* cut index enable << 10 */
641 get_index_type(index_buffer
->type
) << 8 |
643 OUT_RELOC(brw
->ib
.bo
,
644 I915_GEM_DOMAIN_VERTEX
, 0,
646 OUT_RELOC(brw
->ib
.bo
,
647 I915_GEM_DOMAIN_VERTEX
, 0,
648 brw
->ib
.offset
+ brw
->ib
.size
- 1);
652 const struct brw_tracked_state brw_index_buffer
= {
655 .brw
= BRW_NEW_BATCH
| BRW_NEW_INDEX_BUFFER
,
658 .emit
= brw_emit_index_buffer
,