2 * Copyright 2003 VMware, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include <sys/errno.h>
28 #include "main/glheader.h"
29 #include "main/context.h"
30 #include "main/condrender.h"
31 #include "main/samplerobj.h"
32 #include "main/state.h"
33 #include "main/enums.h"
34 #include "main/macros.h"
35 #include "main/transformfeedback.h"
36 #include "main/framebuffer.h"
38 #include "vbo/vbo_context.h"
39 #include "swrast/swrast.h"
40 #include "swrast_setup/swrast_setup.h"
41 #include "drivers/common/meta.h"
43 #include "brw_blorp.h"
45 #include "brw_defines.h"
46 #include "brw_context.h"
47 #include "brw_state.h"
50 #include "intel_batchbuffer.h"
51 #include "intel_buffers.h"
52 #include "intel_fbo.h"
53 #include "intel_mipmap_tree.h"
54 #include "intel_buffer_objects.h"
56 #define FILE_DEBUG_FLAG DEBUG_PRIMS
58 static const GLuint prim_to_hw_prim
[GL_TRIANGLE_STRIP_ADJACENCY
+1] = {
59 [GL_POINTS
] =_3DPRIM_POINTLIST
,
60 [GL_LINES
] = _3DPRIM_LINELIST
,
61 [GL_LINE_LOOP
] = _3DPRIM_LINELOOP
,
62 [GL_LINE_STRIP
] = _3DPRIM_LINESTRIP
,
63 [GL_TRIANGLES
] = _3DPRIM_TRILIST
,
64 [GL_TRIANGLE_STRIP
] = _3DPRIM_TRISTRIP
,
65 [GL_TRIANGLE_FAN
] = _3DPRIM_TRIFAN
,
66 [GL_QUADS
] = _3DPRIM_QUADLIST
,
67 [GL_QUAD_STRIP
] = _3DPRIM_QUADSTRIP
,
68 [GL_POLYGON
] = _3DPRIM_POLYGON
,
69 [GL_LINES_ADJACENCY
] = _3DPRIM_LINELIST_ADJ
,
70 [GL_LINE_STRIP_ADJACENCY
] = _3DPRIM_LINESTRIP_ADJ
,
71 [GL_TRIANGLES_ADJACENCY
] = _3DPRIM_TRILIST_ADJ
,
72 [GL_TRIANGLE_STRIP_ADJACENCY
] = _3DPRIM_TRISTRIP_ADJ
,
76 static const GLenum reduced_prim
[GL_POLYGON
+1] = {
77 [GL_POINTS
] = GL_POINTS
,
78 [GL_LINES
] = GL_LINES
,
79 [GL_LINE_LOOP
] = GL_LINES
,
80 [GL_LINE_STRIP
] = GL_LINES
,
81 [GL_TRIANGLES
] = GL_TRIANGLES
,
82 [GL_TRIANGLE_STRIP
] = GL_TRIANGLES
,
83 [GL_TRIANGLE_FAN
] = GL_TRIANGLES
,
84 [GL_QUADS
] = GL_TRIANGLES
,
85 [GL_QUAD_STRIP
] = GL_TRIANGLES
,
86 [GL_POLYGON
] = GL_TRIANGLES
90 get_hw_prim_for_gl_prim(int mode
)
92 if (mode
>= BRW_PRIM_OFFSET
)
93 return mode
- BRW_PRIM_OFFSET
;
95 assert(mode
< ARRAY_SIZE(prim_to_hw_prim
));
96 return prim_to_hw_prim
[mode
];
101 /* When the primitive changes, set a state bit and re-validate. Not
102 * the nicest and would rather deal with this by having all the
103 * programs be immune to the active primitive (ie. cope with all
104 * possibilities). That may not be realistic however.
107 brw_set_prim(struct brw_context
*brw
, const struct _mesa_prim
*prim
)
109 struct gl_context
*ctx
= &brw
->ctx
;
110 uint32_t hw_prim
= get_hw_prim_for_gl_prim(prim
->mode
);
112 DBG("PRIM: %s\n", _mesa_enum_to_string(prim
->mode
));
114 /* Slight optimization to avoid the GS program when not needed:
116 if (prim
->mode
== GL_QUAD_STRIP
&&
117 ctx
->Light
.ShadeModel
!= GL_FLAT
&&
118 ctx
->Polygon
.FrontMode
== GL_FILL
&&
119 ctx
->Polygon
.BackMode
== GL_FILL
)
120 hw_prim
= _3DPRIM_TRISTRIP
;
122 if (prim
->mode
== GL_QUADS
&& prim
->count
== 4 &&
123 ctx
->Light
.ShadeModel
!= GL_FLAT
&&
124 ctx
->Polygon
.FrontMode
== GL_FILL
&&
125 ctx
->Polygon
.BackMode
== GL_FILL
) {
126 hw_prim
= _3DPRIM_TRIFAN
;
129 if (hw_prim
!= brw
->primitive
) {
130 brw
->primitive
= hw_prim
;
131 brw
->ctx
.NewDriverState
|= BRW_NEW_PRIMITIVE
;
133 if (reduced_prim
[prim
->mode
] != brw
->reduced_primitive
) {
134 brw
->reduced_primitive
= reduced_prim
[prim
->mode
];
135 brw
->ctx
.NewDriverState
|= BRW_NEW_REDUCED_PRIMITIVE
;
141 gen6_set_prim(struct brw_context
*brw
, const struct _mesa_prim
*prim
)
143 DBG("PRIM: %s\n", _mesa_enum_to_string(prim
->mode
));
145 const uint32_t hw_prim
= get_hw_prim_for_gl_prim(prim
->mode
);
146 if (hw_prim
!= brw
->primitive
) {
147 brw
->primitive
= hw_prim
;
148 brw
->ctx
.NewDriverState
|= BRW_NEW_PRIMITIVE
;
154 * The hardware is capable of removing dangling vertices on its own; however,
155 * prior to Gen6, we sometimes convert quads into trifans (and quad strips
156 * into tristrips), since pre-Gen6 hardware requires a GS to render quads.
157 * This function manually trims dangling vertices from a draw call involving
158 * quads so that those dangling vertices won't get drawn when we convert to
162 trim(GLenum prim
, GLuint length
)
164 if (prim
== GL_QUAD_STRIP
)
165 return length
> 3 ? (length
- length
% 2) : 0;
166 else if (prim
== GL_QUADS
)
167 return length
- length
% 4;
174 brw_emit_prim(struct brw_context
*brw
,
175 const struct _mesa_prim
*prim
,
178 int verts_per_instance
;
179 int vertex_access_type
;
182 DBG("PRIM: %s %d %d\n", _mesa_enum_to_string(prim
->mode
),
183 prim
->start
, prim
->count
);
185 int start_vertex_location
= prim
->start
;
186 int base_vertex_location
= prim
->basevertex
;
189 vertex_access_type
= brw
->gen
>= 7 ?
190 GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM
:
191 GEN4_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM
;
192 start_vertex_location
+= brw
->ib
.start_vertex_offset
;
193 base_vertex_location
+= brw
->vb
.start_vertex_bias
;
195 vertex_access_type
= brw
->gen
>= 7 ?
196 GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL
:
197 GEN4_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL
;
198 start_vertex_location
+= brw
->vb
.start_vertex_bias
;
201 /* We only need to trim the primitive count on pre-Gen6. */
203 verts_per_instance
= trim(prim
->mode
, prim
->count
);
205 verts_per_instance
= prim
->count
;
207 /* If nothing to emit, just return. */
208 if (verts_per_instance
== 0 && !prim
->is_indirect
)
211 /* If we're set to always flush, do it before and after the primitive emit.
212 * We want to catch both missed flushes that hurt instruction/state cache
213 * and missed flushes of the render cache as it heads to other parts of
214 * the besides the draw code.
216 if (brw
->always_flush_cache
)
217 brw_emit_mi_flush(brw
);
219 /* If indirect, emit a bunch of loads from the indirect BO. */
220 if (prim
->is_indirect
) {
221 struct gl_buffer_object
*indirect_buffer
= brw
->ctx
.DrawIndirectBuffer
;
222 drm_intel_bo
*bo
= intel_bufferobj_buffer(brw
,
223 intel_buffer_object(indirect_buffer
),
224 prim
->indirect_offset
, 5 * sizeof(GLuint
));
226 indirect_flag
= GEN7_3DPRIM_INDIRECT_PARAMETER_ENABLE
;
228 brw_load_register_mem(brw
, GEN7_3DPRIM_VERTEX_COUNT
, bo
,
229 I915_GEM_DOMAIN_VERTEX
, 0,
230 prim
->indirect_offset
+ 0);
231 brw_load_register_mem(brw
, GEN7_3DPRIM_INSTANCE_COUNT
, bo
,
232 I915_GEM_DOMAIN_VERTEX
, 0,
233 prim
->indirect_offset
+ 4);
235 brw_load_register_mem(brw
, GEN7_3DPRIM_START_VERTEX
, bo
,
236 I915_GEM_DOMAIN_VERTEX
, 0,
237 prim
->indirect_offset
+ 8);
239 brw_load_register_mem(brw
, GEN7_3DPRIM_BASE_VERTEX
, bo
,
240 I915_GEM_DOMAIN_VERTEX
, 0,
241 prim
->indirect_offset
+ 12);
242 brw_load_register_mem(brw
, GEN7_3DPRIM_START_INSTANCE
, bo
,
243 I915_GEM_DOMAIN_VERTEX
, 0,
244 prim
->indirect_offset
+ 16);
246 brw_load_register_mem(brw
, GEN7_3DPRIM_START_INSTANCE
, bo
,
247 I915_GEM_DOMAIN_VERTEX
, 0,
248 prim
->indirect_offset
+ 12);
250 OUT_BATCH(MI_LOAD_REGISTER_IMM
| (3 - 2));
251 OUT_BATCH(GEN7_3DPRIM_BASE_VERTEX
);
259 BEGIN_BATCH(brw
->gen
>= 7 ? 7 : 6);
262 const int predicate_enable
=
263 (brw
->predicate
.state
== BRW_PREDICATE_STATE_USE_BIT
)
264 ? GEN7_3DPRIM_PREDICATE_ENABLE
: 0;
266 OUT_BATCH(CMD_3D_PRIM
<< 16 | (7 - 2) | indirect_flag
| predicate_enable
);
267 OUT_BATCH(hw_prim
| vertex_access_type
);
269 OUT_BATCH(CMD_3D_PRIM
<< 16 | (6 - 2) |
270 hw_prim
<< GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT
|
273 OUT_BATCH(verts_per_instance
);
274 OUT_BATCH(start_vertex_location
);
275 OUT_BATCH(prim
->num_instances
);
276 OUT_BATCH(prim
->base_instance
);
277 OUT_BATCH(base_vertex_location
);
280 if (brw
->always_flush_cache
)
281 brw_emit_mi_flush(brw
);
286 brw_merge_inputs(struct brw_context
*brw
,
287 const struct gl_client_array
*arrays
[])
289 const struct gl_context
*ctx
= &brw
->ctx
;
292 for (i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
293 drm_intel_bo_unreference(brw
->vb
.buffers
[i
].bo
);
294 brw
->vb
.buffers
[i
].bo
= NULL
;
296 brw
->vb
.nr_buffers
= 0;
298 for (i
= 0; i
< VERT_ATTRIB_MAX
; i
++) {
299 brw
->vb
.inputs
[i
].buffer
= -1;
300 brw
->vb
.inputs
[i
].glarray
= arrays
[i
];
303 if (brw
->gen
< 8 && !brw
->is_haswell
) {
304 struct gl_program
*vp
= &ctx
->VertexProgram
._Current
->Base
;
305 /* Prior to Haswell, the hardware can't natively support GL_FIXED or
306 * 2_10_10_10_REV vertex formats. Set appropriate workaround flags.
308 for (i
= 0; i
< VERT_ATTRIB_MAX
; i
++) {
309 if (!(vp
->InputsRead
& BITFIELD64_BIT(i
)))
312 uint8_t wa_flags
= 0;
314 switch (brw
->vb
.inputs
[i
].glarray
->Type
) {
317 wa_flags
= brw
->vb
.inputs
[i
].glarray
->Size
;
320 case GL_INT_2_10_10_10_REV
:
321 wa_flags
|= BRW_ATTRIB_WA_SIGN
;
324 case GL_UNSIGNED_INT_2_10_10_10_REV
:
325 if (brw
->vb
.inputs
[i
].glarray
->Format
== GL_BGRA
)
326 wa_flags
|= BRW_ATTRIB_WA_BGRA
;
328 if (brw
->vb
.inputs
[i
].glarray
->Normalized
)
329 wa_flags
|= BRW_ATTRIB_WA_NORMALIZE
;
330 else if (!brw
->vb
.inputs
[i
].glarray
->Integer
)
331 wa_flags
|= BRW_ATTRIB_WA_SCALE
;
336 if (brw
->vb
.attrib_wa_flags
[i
] != wa_flags
) {
337 brw
->vb
.attrib_wa_flags
[i
] = wa_flags
;
338 brw
->ctx
.NewDriverState
|= BRW_NEW_VS_ATTRIB_WORKAROUNDS
;
345 * \brief Call this after drawing to mark which buffers need resolving
347 * If the depth buffer was written to and if it has an accompanying HiZ
348 * buffer, then mark that it needs a depth resolve.
350 * If the color buffer is a multisample window system buffer, then
351 * mark that it needs a downsample.
353 * Also mark any render targets which will be textured as needing a render
357 brw_postdraw_set_buffers_need_resolve(struct brw_context
*brw
)
359 struct gl_context
*ctx
= &brw
->ctx
;
360 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
362 struct intel_renderbuffer
*front_irb
= NULL
;
363 struct intel_renderbuffer
*back_irb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
364 struct intel_renderbuffer
*depth_irb
= intel_get_renderbuffer(fb
, BUFFER_DEPTH
);
365 struct intel_renderbuffer
*stencil_irb
= intel_get_renderbuffer(fb
, BUFFER_STENCIL
);
366 struct gl_renderbuffer_attachment
*depth_att
= &fb
->Attachment
[BUFFER_DEPTH
];
368 if (_mesa_is_front_buffer_drawing(fb
))
369 front_irb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
372 front_irb
->need_downsample
= true;
374 back_irb
->need_downsample
= true;
375 if (depth_irb
&& ctx
->Depth
.Mask
) {
376 intel_renderbuffer_att_set_needs_depth_resolve(depth_att
);
377 brw_render_cache_set_add_bo(brw
, depth_irb
->mt
->bo
);
380 if (ctx
->Extensions
.ARB_stencil_texturing
&&
381 stencil_irb
&& ctx
->Stencil
._WriteEnabled
) {
382 brw_render_cache_set_add_bo(brw
, stencil_irb
->mt
->bo
);
385 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
386 struct intel_renderbuffer
*irb
=
387 intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
390 brw_render_cache_set_add_bo(brw
, irb
->mt
->bo
);
394 /* May fail if out of video memory for texture or vbo upload, or on
395 * fallback conditions.
398 brw_try_draw_prims(struct gl_context
*ctx
,
399 const struct gl_client_array
*arrays
[],
400 const struct _mesa_prim
*prims
,
402 const struct _mesa_index_buffer
*ib
,
405 struct gl_buffer_object
*indirect
)
407 struct brw_context
*brw
= brw_context(ctx
);
409 bool fail_next
= false;
412 _mesa_update_state(ctx
);
414 /* Find the highest sampler unit used by each shader program. A bit-count
415 * won't work since ARB programs use the texture unit number as the sampler
418 brw
->wm
.base
.sampler_count
=
419 _mesa_fls(ctx
->FragmentProgram
._Current
->Base
.SamplersUsed
);
420 brw
->gs
.base
.sampler_count
= ctx
->GeometryProgram
._Current
?
421 _mesa_fls(ctx
->GeometryProgram
._Current
->Base
.SamplersUsed
) : 0;
422 brw
->vs
.base
.sampler_count
=
423 _mesa_fls(ctx
->VertexProgram
._Current
->Base
.SamplersUsed
);
425 /* We have to validate the textures *before* checking for fallbacks;
426 * otherwise, the software fallback won't be able to rely on the
427 * texture state, the firstLevel and lastLevel fields won't be
428 * set in the intel texture object (they'll both be 0), and the
429 * software fallback will segfault if it attempts to access any
430 * texture level other than level 0.
432 brw_validate_textures(brw
);
434 intel_prepare_render(brw
);
436 /* This workaround has to happen outside of brw_upload_render_state()
437 * because it may flush the batchbuffer for a blit, affecting the state
440 brw_workaround_depthstencil_alignment(brw
, 0);
442 /* Bind all inputs, derive varying and size information:
444 brw_merge_inputs(brw
, arrays
);
447 brw
->ctx
.NewDriverState
|= BRW_NEW_INDICES
;
449 brw
->vb
.min_index
= min_index
;
450 brw
->vb
.max_index
= max_index
;
451 brw
->ctx
.NewDriverState
|= BRW_NEW_VERTICES
;
453 for (i
= 0; i
< nr_prims
; i
++) {
454 int estimated_max_prim_size
;
455 const int sampler_state_size
= 16;
457 estimated_max_prim_size
= 512; /* batchbuffer commands */
458 estimated_max_prim_size
+= BRW_MAX_TEX_UNIT
*
459 (sampler_state_size
+ sizeof(struct gen5_sampler_default_color
));
460 estimated_max_prim_size
+= 1024; /* gen6 VS push constants */
461 estimated_max_prim_size
+= 1024; /* gen6 WM push constants */
462 estimated_max_prim_size
+= 512; /* misc. pad */
464 /* Flush the batch if it's approaching full, so that we don't wrap while
465 * we've got validated state that needs to be in the same batch as the
468 intel_batchbuffer_require_space(brw
, estimated_max_prim_size
, RENDER_RING
);
469 intel_batchbuffer_save_state(brw
);
471 if (brw
->num_instances
!= prims
[i
].num_instances
||
472 brw
->basevertex
!= prims
[i
].basevertex
) {
473 brw
->num_instances
= prims
[i
].num_instances
;
474 brw
->basevertex
= prims
[i
].basevertex
;
475 if (i
> 0) { /* For i == 0 we just did this before the loop */
476 brw
->ctx
.NewDriverState
|= BRW_NEW_VERTICES
;
477 brw_merge_inputs(brw
, arrays
);
481 brw
->draw
.gl_basevertex
=
482 prims
[i
].indexed
? prims
[i
].basevertex
: prims
[i
].start
;
484 drm_intel_bo_unreference(brw
->draw
.draw_params_bo
);
486 if (prims
[i
].is_indirect
) {
487 /* Point draw_params_bo at the indirect buffer. */
488 brw
->draw
.draw_params_bo
=
489 intel_buffer_object(ctx
->DrawIndirectBuffer
)->buffer
;
490 drm_intel_bo_reference(brw
->draw
.draw_params_bo
);
491 brw
->draw
.draw_params_offset
=
492 prims
[i
].indirect_offset
+ (prims
[i
].indexed
? 12 : 8);
494 /* Set draw_params_bo to NULL so brw_prepare_vertices knows it
495 * has to upload gl_BaseVertex and such if they're needed.
497 brw
->draw
.draw_params_bo
= NULL
;
498 brw
->draw
.draw_params_offset
= 0;
502 brw_set_prim(brw
, &prims
[i
]);
504 gen6_set_prim(brw
, &prims
[i
]);
508 /* Note that before the loop, brw->ctx.NewDriverState was set to != 0, and
509 * that the state updated in the loop outside of this block is that in
510 * *_set_prim or intel_batchbuffer_flush(), which only impacts
511 * brw->ctx.NewDriverState.
513 if (brw
->ctx
.NewDriverState
) {
514 brw
->no_batch_wrap
= true;
515 brw_upload_render_state(brw
);
518 brw_emit_prim(brw
, &prims
[i
], brw
->primitive
);
520 brw
->no_batch_wrap
= false;
522 if (dri_bufmgr_check_aperture_space(&brw
->batch
.bo
, 1)) {
524 intel_batchbuffer_reset_to_saved(brw
);
525 intel_batchbuffer_flush(brw
);
529 int ret
= intel_batchbuffer_flush(brw
);
530 WARN_ONCE(ret
== -ENOSPC
,
531 "i965: Single primitive emit exceeded "
532 "available aperture space\n");
536 /* Now that we know we haven't run out of aperture space, we can safely
537 * reset the dirty bits.
539 if (brw
->ctx
.NewDriverState
)
540 brw_render_state_finished(brw
);
543 if (brw
->always_flush_batch
)
544 intel_batchbuffer_flush(brw
);
546 brw_state_cache_check_size(brw
);
547 brw_postdraw_set_buffers_need_resolve(brw
);
553 brw_draw_prims(struct gl_context
*ctx
,
554 const struct _mesa_prim
*prims
,
556 const struct _mesa_index_buffer
*ib
,
557 GLboolean index_bounds_valid
,
560 struct gl_transform_feedback_object
*unused_tfb_object
,
562 struct gl_buffer_object
*indirect
)
564 struct brw_context
*brw
= brw_context(ctx
);
565 const struct gl_client_array
**arrays
= ctx
->Array
._DrawArrays
;
567 assert(unused_tfb_object
== NULL
);
569 if (!brw_check_conditional_render(brw
))
572 /* Handle primitive restart if needed */
573 if (brw_handle_primitive_restart(ctx
, prims
, nr_prims
, ib
, indirect
)) {
574 /* The draw was handled, so we can exit now */
578 /* Do GL_SELECT and GL_FEEDBACK rendering using swrast, even though it
579 * won't support all the extensions we support.
581 if (ctx
->RenderMode
!= GL_RENDER
) {
582 perf_debug("%s render mode not supported in hardware\n",
583 _mesa_enum_to_string(ctx
->RenderMode
));
584 _swsetup_Wakeup(ctx
);
586 _tnl_draw_prims(ctx
, prims
, nr_prims
, ib
,
587 index_bounds_valid
, min_index
, max_index
, NULL
, 0, NULL
);
591 /* If we're going to have to upload any of the user's vertex arrays, then
592 * get the minimum and maximum of their index buffer so we know what range
595 if (!index_bounds_valid
&& !vbo_all_varyings_in_vbos(arrays
)) {
596 perf_debug("Scanning index buffer to compute index buffer bounds. "
597 "Use glDrawRangeElements() to avoid this.\n");
598 vbo_get_minmax_indices(ctx
, prims
, ib
, &min_index
, &max_index
, nr_prims
);
601 /* Try drawing with the hardware, but don't do anything else if we can't
602 * manage it. swrast doesn't support our featureset, so we can't fall back
605 brw_try_draw_prims(ctx
, arrays
, prims
, nr_prims
, ib
, min_index
, max_index
,
610 brw_draw_init(struct brw_context
*brw
)
612 struct gl_context
*ctx
= &brw
->ctx
;
613 struct vbo_context
*vbo
= vbo_context(ctx
);
615 /* Register our drawing function:
617 vbo
->draw_prims
= brw_draw_prims
;
619 for (int i
= 0; i
< VERT_ATTRIB_MAX
; i
++)
620 brw
->vb
.inputs
[i
].buffer
= -1;
621 brw
->vb
.nr_buffers
= 0;
622 brw
->vb
.nr_enabled
= 0;
626 brw_draw_destroy(struct brw_context
*brw
)
630 for (i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
631 drm_intel_bo_unreference(brw
->vb
.buffers
[i
].bo
);
632 brw
->vb
.buffers
[i
].bo
= NULL
;
634 brw
->vb
.nr_buffers
= 0;
636 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
637 brw
->vb
.enabled
[i
]->buffer
= -1;
639 brw
->vb
.nr_enabled
= 0;
641 drm_intel_bo_unreference(brw
->ib
.bo
);