1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "main/glheader.h"
30 #include "main/bufferobj.h"
31 #include "main/context.h"
32 #include "main/state.h"
33 #include "main/api_validate.h"
34 #include "main/enums.h"
37 #include "brw_defines.h"
38 #include "brw_context.h"
39 #include "brw_state.h"
40 #include "brw_fallback.h"
42 #include "intel_batchbuffer.h"
43 #include "intel_buffer_objects.h"
44 #include "intel_tex.h"
46 static GLuint double_types
[5] = {
48 BRW_SURFACEFORMAT_R64_FLOAT
,
49 BRW_SURFACEFORMAT_R64G64_FLOAT
,
50 BRW_SURFACEFORMAT_R64G64B64_FLOAT
,
51 BRW_SURFACEFORMAT_R64G64B64A64_FLOAT
54 static GLuint float_types
[5] = {
56 BRW_SURFACEFORMAT_R32_FLOAT
,
57 BRW_SURFACEFORMAT_R32G32_FLOAT
,
58 BRW_SURFACEFORMAT_R32G32B32_FLOAT
,
59 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
62 static GLuint uint_types_norm
[5] = {
64 BRW_SURFACEFORMAT_R32_UNORM
,
65 BRW_SURFACEFORMAT_R32G32_UNORM
,
66 BRW_SURFACEFORMAT_R32G32B32_UNORM
,
67 BRW_SURFACEFORMAT_R32G32B32A32_UNORM
70 static GLuint uint_types_scale
[5] = {
72 BRW_SURFACEFORMAT_R32_USCALED
,
73 BRW_SURFACEFORMAT_R32G32_USCALED
,
74 BRW_SURFACEFORMAT_R32G32B32_USCALED
,
75 BRW_SURFACEFORMAT_R32G32B32A32_USCALED
78 static GLuint int_types_norm
[5] = {
80 BRW_SURFACEFORMAT_R32_SNORM
,
81 BRW_SURFACEFORMAT_R32G32_SNORM
,
82 BRW_SURFACEFORMAT_R32G32B32_SNORM
,
83 BRW_SURFACEFORMAT_R32G32B32A32_SNORM
86 static GLuint int_types_scale
[5] = {
88 BRW_SURFACEFORMAT_R32_SSCALED
,
89 BRW_SURFACEFORMAT_R32G32_SSCALED
,
90 BRW_SURFACEFORMAT_R32G32B32_SSCALED
,
91 BRW_SURFACEFORMAT_R32G32B32A32_SSCALED
94 static GLuint ushort_types_norm
[5] = {
96 BRW_SURFACEFORMAT_R16_UNORM
,
97 BRW_SURFACEFORMAT_R16G16_UNORM
,
98 BRW_SURFACEFORMAT_R16G16B16_UNORM
,
99 BRW_SURFACEFORMAT_R16G16B16A16_UNORM
102 static GLuint ushort_types_scale
[5] = {
104 BRW_SURFACEFORMAT_R16_USCALED
,
105 BRW_SURFACEFORMAT_R16G16_USCALED
,
106 BRW_SURFACEFORMAT_R16G16B16_USCALED
,
107 BRW_SURFACEFORMAT_R16G16B16A16_USCALED
110 static GLuint short_types_norm
[5] = {
112 BRW_SURFACEFORMAT_R16_SNORM
,
113 BRW_SURFACEFORMAT_R16G16_SNORM
,
114 BRW_SURFACEFORMAT_R16G16B16_SNORM
,
115 BRW_SURFACEFORMAT_R16G16B16A16_SNORM
118 static GLuint short_types_scale
[5] = {
120 BRW_SURFACEFORMAT_R16_SSCALED
,
121 BRW_SURFACEFORMAT_R16G16_SSCALED
,
122 BRW_SURFACEFORMAT_R16G16B16_SSCALED
,
123 BRW_SURFACEFORMAT_R16G16B16A16_SSCALED
126 static GLuint ubyte_types_norm
[5] = {
128 BRW_SURFACEFORMAT_R8_UNORM
,
129 BRW_SURFACEFORMAT_R8G8_UNORM
,
130 BRW_SURFACEFORMAT_R8G8B8_UNORM
,
131 BRW_SURFACEFORMAT_R8G8B8A8_UNORM
134 static GLuint ubyte_types_scale
[5] = {
136 BRW_SURFACEFORMAT_R8_USCALED
,
137 BRW_SURFACEFORMAT_R8G8_USCALED
,
138 BRW_SURFACEFORMAT_R8G8B8_USCALED
,
139 BRW_SURFACEFORMAT_R8G8B8A8_USCALED
142 static GLuint byte_types_norm
[5] = {
144 BRW_SURFACEFORMAT_R8_SNORM
,
145 BRW_SURFACEFORMAT_R8G8_SNORM
,
146 BRW_SURFACEFORMAT_R8G8B8_SNORM
,
147 BRW_SURFACEFORMAT_R8G8B8A8_SNORM
150 static GLuint byte_types_scale
[5] = {
152 BRW_SURFACEFORMAT_R8_SSCALED
,
153 BRW_SURFACEFORMAT_R8G8_SSCALED
,
154 BRW_SURFACEFORMAT_R8G8B8_SSCALED
,
155 BRW_SURFACEFORMAT_R8G8B8A8_SSCALED
160 * Given vertex array type/size/format/normalized info, return
161 * the appopriate hardware surface type.
162 * Format will be GL_RGBA or possibly GL_BGRA for GLubyte[4] color arrays.
164 static GLuint
get_surface_type( GLenum type
, GLuint size
,
165 GLenum format
, GLboolean normalized
)
167 if (INTEL_DEBUG
& DEBUG_VERTS
)
168 _mesa_printf("type %s size %d normalized %d\n",
169 _mesa_lookup_enum_by_nr(type
), size
, normalized
);
173 case GL_DOUBLE
: return double_types
[size
];
174 case GL_FLOAT
: return float_types
[size
];
175 case GL_INT
: return int_types_norm
[size
];
176 case GL_SHORT
: return short_types_norm
[size
];
177 case GL_BYTE
: return byte_types_norm
[size
];
178 case GL_UNSIGNED_INT
: return uint_types_norm
[size
];
179 case GL_UNSIGNED_SHORT
: return ushort_types_norm
[size
];
180 case GL_UNSIGNED_BYTE
:
181 if (format
== GL_BGRA
) {
182 /* See GL_EXT_vertex_array_bgra */
184 return BRW_SURFACEFORMAT_B8G8R8A8_UNORM
;
187 return ubyte_types_norm
[size
];
189 default: assert(0); return 0;
193 assert(format
== GL_RGBA
); /* sanity check */
195 case GL_DOUBLE
: return double_types
[size
];
196 case GL_FLOAT
: return float_types
[size
];
197 case GL_INT
: return int_types_scale
[size
];
198 case GL_SHORT
: return short_types_scale
[size
];
199 case GL_BYTE
: return byte_types_scale
[size
];
200 case GL_UNSIGNED_INT
: return uint_types_scale
[size
];
201 case GL_UNSIGNED_SHORT
: return ushort_types_scale
[size
];
202 case GL_UNSIGNED_BYTE
: return ubyte_types_scale
[size
];
203 default: assert(0); return 0;
209 static GLuint
get_size( GLenum type
)
212 case GL_DOUBLE
: return sizeof(GLdouble
);
213 case GL_FLOAT
: return sizeof(GLfloat
);
214 case GL_INT
: return sizeof(GLint
);
215 case GL_SHORT
: return sizeof(GLshort
);
216 case GL_BYTE
: return sizeof(GLbyte
);
217 case GL_UNSIGNED_INT
: return sizeof(GLuint
);
218 case GL_UNSIGNED_SHORT
: return sizeof(GLushort
);
219 case GL_UNSIGNED_BYTE
: return sizeof(GLubyte
);
224 static GLuint
get_index_type(GLenum type
)
227 case GL_UNSIGNED_BYTE
: return BRW_INDEX_BYTE
;
228 case GL_UNSIGNED_SHORT
: return BRW_INDEX_WORD
;
229 case GL_UNSIGNED_INT
: return BRW_INDEX_DWORD
;
230 default: assert(0); return 0;
234 static void wrap_buffers( struct brw_context
*brw
,
237 if (size
< BRW_UPLOAD_INIT_SIZE
)
238 size
= BRW_UPLOAD_INIT_SIZE
;
240 brw
->vb
.upload
.offset
= 0;
242 if (brw
->vb
.upload
.bo
!= NULL
)
243 dri_bo_unreference(brw
->vb
.upload
.bo
);
244 brw
->vb
.upload
.bo
= dri_bo_alloc(brw
->intel
.bufmgr
, "temporary VBO",
248 static void get_space( struct brw_context
*brw
,
251 GLuint
*offset_return
)
253 size
= ALIGN(size
, 64);
255 if (brw
->vb
.upload
.bo
== NULL
||
256 brw
->vb
.upload
.offset
+ size
> brw
->vb
.upload
.bo
->size
) {
257 wrap_buffers(brw
, size
);
260 assert(*bo_return
== NULL
);
261 dri_bo_reference(brw
->vb
.upload
.bo
);
262 *bo_return
= brw
->vb
.upload
.bo
;
263 *offset_return
= brw
->vb
.upload
.offset
;
264 brw
->vb
.upload
.offset
+= size
;
268 copy_array_to_vbo_array( struct brw_context
*brw
,
269 struct brw_vertex_element
*element
,
272 struct intel_context
*intel
= &brw
->intel
;
273 GLuint size
= element
->count
* dst_stride
;
275 get_space(brw
, size
, &element
->bo
, &element
->offset
);
277 if (element
->glarray
->StrideB
== 0) {
278 assert(element
->count
== 1);
281 element
->stride
= dst_stride
;
284 if (dst_stride
== element
->glarray
->StrideB
) {
285 if (intel
->intelScreen
->kernel_exec_fencing
) {
286 drm_intel_gem_bo_map_gtt(element
->bo
);
287 memcpy((char *)element
->bo
->virtual + element
->offset
,
288 element
->glarray
->Ptr
, size
);
289 drm_intel_gem_bo_unmap_gtt(element
->bo
);
291 dri_bo_subdata(element
->bo
,
294 element
->glarray
->Ptr
);
298 const unsigned char *src
= element
->glarray
->Ptr
;
301 if (intel
->intelScreen
->kernel_exec_fencing
) {
302 drm_intel_gem_bo_map_gtt(element
->bo
);
303 dest
= element
->bo
->virtual;
304 dest
+= element
->offset
;
306 for (i
= 0; i
< element
->count
; i
++) {
307 memcpy(dest
, src
, dst_stride
);
308 src
+= element
->glarray
->StrideB
;
312 drm_intel_gem_bo_unmap_gtt(element
->bo
);
316 data
= _mesa_malloc(dst_stride
* element
->count
);
318 for (i
= 0; i
< element
->count
; i
++) {
319 memcpy(dest
, src
, dst_stride
);
320 src
+= element
->glarray
->StrideB
;
324 dri_bo_subdata(element
->bo
,
334 static void brw_prepare_vertices(struct brw_context
*brw
)
336 GLcontext
*ctx
= &brw
->intel
.ctx
;
337 struct intel_context
*intel
= intel_context(ctx
);
338 GLbitfield vs_inputs
= brw
->vs
.prog_data
->inputs_read
;
340 const unsigned char *ptr
= NULL
;
341 GLuint interleave
= 0;
342 unsigned int min_index
= brw
->vb
.min_index
;
343 unsigned int max_index
= brw
->vb
.max_index
;
345 struct brw_vertex_element
*upload
[VERT_ATTRIB_MAX
];
346 GLuint nr_uploads
= 0;
348 /* First build an array of pointers to ve's in vb.inputs_read
351 _mesa_printf("%s %d..%d\n", __FUNCTION__
, min_index
, max_index
);
353 /* Accumulate the list of enabled arrays. */
354 brw
->vb
.nr_enabled
= 0;
356 GLuint i
= _mesa_ffsll(vs_inputs
) - 1;
357 struct brw_vertex_element
*input
= &brw
->vb
.inputs
[i
];
359 vs_inputs
&= ~(1 << i
);
360 brw
->vb
.enabled
[brw
->vb
.nr_enabled
++] = input
;
363 /* XXX: In the rare cases where this happens we fallback all
364 * the way to software rasterization, although a tnl fallback
365 * would be sufficient. I don't know of *any* real world
366 * cases with > 17 vertex attributes enabled, so it probably
367 * isn't an issue at this point.
369 if (brw
->vb
.nr_enabled
>= BRW_VEP_MAX
) {
370 intel
->Fallback
= GL_TRUE
; /* boolean, not bitfield */
374 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
375 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
377 input
->element_size
= get_size(input
->glarray
->Type
) * input
->glarray
->Size
;
379 if (_mesa_is_bufferobj(input
->glarray
->BufferObj
)) {
380 struct intel_buffer_object
*intel_buffer
=
381 intel_buffer_object(input
->glarray
->BufferObj
);
383 /* Named buffer object: Just reference its contents directly. */
384 dri_bo_unreference(input
->bo
);
385 input
->bo
= intel_bufferobj_buffer(intel
, intel_buffer
,
387 dri_bo_reference(input
->bo
);
388 input
->offset
= (unsigned long)input
->glarray
->Ptr
;
389 input
->stride
= input
->glarray
->StrideB
;
390 input
->count
= input
->glarray
->_MaxElement
;
392 /* This is a common place to reach if the user mistakenly supplies
393 * a pointer in place of a VBO offset. If we just let it go through,
394 * we may end up dereferencing a pointer beyond the bounds of the
395 * GTT. We would hope that the VBO's max_index would save us, but
396 * Mesa appears to hand us min/max values not clipped to the
397 * array object's _MaxElement, and _MaxElement frequently appears
398 * to be wrong anyway.
400 * The VBO spec allows application termination in this case, and it's
401 * probably a service to the poor programmer to do so rather than
402 * trying to just not render.
404 assert(input
->offset
< input
->bo
->size
);
406 input
->count
= input
->glarray
->StrideB
? max_index
+ 1 - min_index
: 1;
407 if (input
->bo
!= NULL
) {
408 /* Already-uploaded vertex data is present from a previous
409 * prepare_vertices, but we had to re-validate state due to
410 * check_aperture failing and a new batch being produced.
415 /* Queue the buffer object up to be uploaded in the next pass,
416 * when we've decided if we're doing interleaved or not.
418 if (input
->attrib
== VERT_ATTRIB_POS
) {
419 /* Position array not properly enabled:
421 if (input
->glarray
->StrideB
== 0) {
422 intel
->Fallback
= GL_TRUE
; /* boolean, not bitfield */
426 interleave
= input
->glarray
->StrideB
;
427 ptr
= input
->glarray
->Ptr
;
429 else if (interleave
!= input
->glarray
->StrideB
||
430 (const unsigned char *)input
->glarray
->Ptr
- ptr
< 0 ||
431 (const unsigned char *)input
->glarray
->Ptr
- ptr
> interleave
)
436 upload
[nr_uploads
++] = input
;
438 /* We rebase drawing to start at element zero only when
439 * varyings are not in vbos, which means we can end up
440 * uploading non-varying arrays (stride != 0) when min_index
441 * is zero. This doesn't matter as the amount to upload is
442 * the same for these arrays whether the draw call is rebased
443 * or not - we just have to upload the one element.
445 assert(min_index
== 0 || input
->glarray
->StrideB
== 0);
449 /* Handle any arrays to be uploaded. */
450 if (nr_uploads
> 1 && interleave
&& interleave
<= 256) {
451 /* All uploads are interleaved, so upload the arrays together as
452 * interleaved. First, upload the contents and set up upload[0].
454 copy_array_to_vbo_array(brw
, upload
[0], interleave
);
456 for (i
= 1; i
< nr_uploads
; i
++) {
457 /* Then, just point upload[i] at upload[0]'s buffer. */
458 upload
[i
]->stride
= interleave
;
459 upload
[i
]->offset
= upload
[0]->offset
+
460 ((const unsigned char *)upload
[i
]->glarray
->Ptr
- ptr
);
461 upload
[i
]->bo
= upload
[0]->bo
;
462 dri_bo_reference(upload
[i
]->bo
);
466 /* Upload non-interleaved arrays */
467 for (i
= 0; i
< nr_uploads
; i
++) {
468 copy_array_to_vbo_array(brw
, upload
[i
], upload
[i
]->element_size
);
472 brw_prepare_query_begin(brw
);
474 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
475 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
477 brw_add_validated_bo(brw
, input
->bo
);
481 static void brw_emit_vertices(struct brw_context
*brw
)
483 GLcontext
*ctx
= &brw
->intel
.ctx
;
484 struct intel_context
*intel
= intel_context(ctx
);
487 brw_emit_query_begin(brw
);
489 /* If the VS doesn't read any inputs (calculating vertex position from
490 * a state variable for some reason, for example), emit a single pad
491 * VERTEX_ELEMENT struct and bail.
493 * The stale VB state stays in place, but they don't do anything unless
494 * a VE loads from them.
496 if (brw
->vb
.nr_enabled
== 0) {
498 OUT_BATCH((CMD_VERTEX_ELEMENT
<< 16) | 1);
499 OUT_BATCH((0 << BRW_VE0_INDEX_SHIFT
) |
501 (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT
<< BRW_VE0_FORMAT_SHIFT
) |
502 (0 << BRW_VE0_SRC_OFFSET_SHIFT
));
503 OUT_BATCH((BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
504 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_1_SHIFT
) |
505 (BRW_VE1_COMPONENT_STORE_0
<< BRW_VE1_COMPONENT_2_SHIFT
) |
506 (BRW_VE1_COMPONENT_STORE_1_FLT
<< BRW_VE1_COMPONENT_3_SHIFT
));
511 /* Now emit VB and VEP state packets.
513 * This still defines a hardware VB for each input, even if they
514 * are interleaved or from the same VBO. TBD if this makes a
515 * performance difference.
517 BEGIN_BATCH(1 + brw
->vb
.nr_enabled
* 4);
518 OUT_BATCH((CMD_VERTEX_BUFFER
<< 16) |
519 ((1 + brw
->vb
.nr_enabled
* 4) - 2));
521 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
522 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
524 OUT_BATCH((i
<< BRW_VB0_INDEX_SHIFT
) |
525 BRW_VB0_ACCESS_VERTEXDATA
|
526 (input
->stride
<< BRW_VB0_PITCH_SHIFT
));
528 I915_GEM_DOMAIN_VERTEX
, 0,
530 if (intel
->is_ironlake
) {
532 I915_GEM_DOMAIN_VERTEX
, 0,
533 input
->bo
->size
- 1);
535 OUT_BATCH(input
->stride
? input
->count
: 0);
536 OUT_BATCH(0); /* Instance data step rate */
540 BEGIN_BATCH(1 + brw
->vb
.nr_enabled
* 2);
541 OUT_BATCH((CMD_VERTEX_ELEMENT
<< 16) | ((1 + brw
->vb
.nr_enabled
* 2) - 2));
542 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
543 struct brw_vertex_element
*input
= brw
->vb
.enabled
[i
];
544 uint32_t format
= get_surface_type(input
->glarray
->Type
,
545 input
->glarray
->Size
,
546 input
->glarray
->Format
,
547 input
->glarray
->Normalized
);
548 uint32_t comp0
= BRW_VE1_COMPONENT_STORE_SRC
;
549 uint32_t comp1
= BRW_VE1_COMPONENT_STORE_SRC
;
550 uint32_t comp2
= BRW_VE1_COMPONENT_STORE_SRC
;
551 uint32_t comp3
= BRW_VE1_COMPONENT_STORE_SRC
;
553 switch (input
->glarray
->Size
) {
554 case 0: comp0
= BRW_VE1_COMPONENT_STORE_0
;
555 case 1: comp1
= BRW_VE1_COMPONENT_STORE_0
;
556 case 2: comp2
= BRW_VE1_COMPONENT_STORE_0
;
557 case 3: comp3
= BRW_VE1_COMPONENT_STORE_1_FLT
;
561 OUT_BATCH((i
<< BRW_VE0_INDEX_SHIFT
) |
563 (format
<< BRW_VE0_FORMAT_SHIFT
) |
564 (0 << BRW_VE0_SRC_OFFSET_SHIFT
));
566 if (intel
->is_ironlake
)
567 OUT_BATCH((comp0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
568 (comp1
<< BRW_VE1_COMPONENT_1_SHIFT
) |
569 (comp2
<< BRW_VE1_COMPONENT_2_SHIFT
) |
570 (comp3
<< BRW_VE1_COMPONENT_3_SHIFT
));
572 OUT_BATCH((comp0
<< BRW_VE1_COMPONENT_0_SHIFT
) |
573 (comp1
<< BRW_VE1_COMPONENT_1_SHIFT
) |
574 (comp2
<< BRW_VE1_COMPONENT_2_SHIFT
) |
575 (comp3
<< BRW_VE1_COMPONENT_3_SHIFT
) |
576 ((i
* 4) << BRW_VE1_DST_OFFSET_SHIFT
));
581 const struct brw_tracked_state brw_vertices
= {
584 .brw
= BRW_NEW_BATCH
| BRW_NEW_VERTICES
,
587 .prepare
= brw_prepare_vertices
,
588 .emit
= brw_emit_vertices
,
591 static void brw_prepare_indices(struct brw_context
*brw
)
593 GLcontext
*ctx
= &brw
->intel
.ctx
;
594 struct intel_context
*intel
= &brw
->intel
;
595 const struct _mesa_index_buffer
*index_buffer
= brw
->ib
.ib
;
598 struct gl_buffer_object
*bufferobj
;
602 if (index_buffer
== NULL
)
605 ib_type_size
= get_size(index_buffer
->type
);
606 ib_size
= ib_type_size
* index_buffer
->count
;
607 bufferobj
= index_buffer
->obj
;;
609 /* Turn into a proper VBO:
611 if (!_mesa_is_bufferobj(bufferobj
)) {
612 brw
->ib
.start_vertex_offset
= 0;
614 /* Get new bufferobj, offset:
616 get_space(brw
, ib_size
, &bo
, &offset
);
620 if (intel
->intelScreen
->kernel_exec_fencing
) {
621 drm_intel_gem_bo_map_gtt(bo
);
622 memcpy((char *)bo
->virtual + offset
, index_buffer
->ptr
, ib_size
);
623 drm_intel_gem_bo_unmap_gtt(bo
);
625 dri_bo_subdata(bo
, offset
, ib_size
, index_buffer
->ptr
);
628 offset
= (GLuint
) (unsigned long) index_buffer
->ptr
;
629 brw
->ib
.start_vertex_offset
= 0;
631 /* If the index buffer isn't aligned to its element size, we have to
632 * rebase it into a temporary.
634 if ((get_size(index_buffer
->type
) - 1) & offset
) {
635 GLubyte
*map
= ctx
->Driver
.MapBuffer(ctx
,
636 GL_ELEMENT_ARRAY_BUFFER_ARB
,
641 get_space(brw
, ib_size
, &bo
, &offset
);
643 dri_bo_subdata(bo
, offset
, ib_size
, map
);
645 ctx
->Driver
.UnmapBuffer(ctx
, GL_ELEMENT_ARRAY_BUFFER_ARB
, bufferobj
);
647 bo
= intel_bufferobj_buffer(intel
, intel_buffer_object(bufferobj
),
649 dri_bo_reference(bo
);
651 /* Use CMD_3D_PRIM's start_vertex_offset to avoid re-uploading
652 * the index buffer state when we're just moving the start index
655 brw
->ib
.start_vertex_offset
= offset
/ ib_type_size
;
661 if (brw
->ib
.bo
!= bo
||
662 brw
->ib
.offset
!= offset
||
663 brw
->ib
.size
!= ib_size
)
665 drm_intel_bo_unreference(brw
->ib
.bo
);
667 brw
->ib
.offset
= offset
;
668 brw
->ib
.size
= ib_size
;
670 brw
->state
.dirty
.brw
|= BRW_NEW_INDEX_BUFFER
;
672 drm_intel_bo_unreference(bo
);
675 brw_add_validated_bo(brw
, brw
->ib
.bo
);
678 const struct brw_tracked_state brw_indices
= {
681 .brw
= BRW_NEW_INDICES
,
684 .prepare
= brw_prepare_indices
,
687 static void brw_emit_index_buffer(struct brw_context
*brw
)
689 struct intel_context
*intel
= &brw
->intel
;
690 const struct _mesa_index_buffer
*index_buffer
= brw
->ib
.ib
;
692 if (index_buffer
== NULL
)
695 /* Emit the indexbuffer packet:
698 struct brw_indexbuffer ib
;
700 memset(&ib
, 0, sizeof(ib
));
702 ib
.header
.bits
.opcode
= CMD_INDEX_BUFFER
;
703 ib
.header
.bits
.length
= sizeof(ib
)/4 - 2;
704 ib
.header
.bits
.index_format
= get_index_type(index_buffer
->type
);
705 ib
.header
.bits
.cut_index_enable
= 0;
708 OUT_BATCH( ib
.header
.dword
);
709 OUT_RELOC(brw
->ib
.bo
,
710 I915_GEM_DOMAIN_VERTEX
, 0,
712 OUT_RELOC(brw
->ib
.bo
,
713 I915_GEM_DOMAIN_VERTEX
, 0,
714 brw
->ib
.offset
+ brw
->ib
.size
- 1);
720 const struct brw_tracked_state brw_index_buffer
= {
723 .brw
= BRW_NEW_BATCH
| BRW_NEW_INDEX_BUFFER
,
726 .emit
= brw_emit_index_buffer
,