2 * Copyright 2003 VMware, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include <sys/errno.h>
28 #include "main/context.h"
29 #include "main/condrender.h"
30 #include "main/samplerobj.h"
31 #include "main/state.h"
32 #include "main/enums.h"
33 #include "main/macros.h"
34 #include "main/transformfeedback.h"
35 #include "main/framebuffer.h"
37 #include "vbo/vbo_context.h"
38 #include "swrast/swrast.h"
39 #include "swrast_setup/swrast_setup.h"
40 #include "drivers/common/meta.h"
42 #include "brw_blorp.h"
44 #include "brw_defines.h"
45 #include "brw_context.h"
46 #include "brw_state.h"
49 #include "intel_batchbuffer.h"
50 #include "intel_buffers.h"
51 #include "intel_fbo.h"
52 #include "intel_mipmap_tree.h"
53 #include "intel_buffer_objects.h"
55 #define FILE_DEBUG_FLAG DEBUG_PRIMS
57 static const GLuint prim_to_hw_prim
[GL_TRIANGLE_STRIP_ADJACENCY
+1] = {
58 [GL_POINTS
] =_3DPRIM_POINTLIST
,
59 [GL_LINES
] = _3DPRIM_LINELIST
,
60 [GL_LINE_LOOP
] = _3DPRIM_LINELOOP
,
61 [GL_LINE_STRIP
] = _3DPRIM_LINESTRIP
,
62 [GL_TRIANGLES
] = _3DPRIM_TRILIST
,
63 [GL_TRIANGLE_STRIP
] = _3DPRIM_TRISTRIP
,
64 [GL_TRIANGLE_FAN
] = _3DPRIM_TRIFAN
,
65 [GL_QUADS
] = _3DPRIM_QUADLIST
,
66 [GL_QUAD_STRIP
] = _3DPRIM_QUADSTRIP
,
67 [GL_POLYGON
] = _3DPRIM_POLYGON
,
68 [GL_LINES_ADJACENCY
] = _3DPRIM_LINELIST_ADJ
,
69 [GL_LINE_STRIP_ADJACENCY
] = _3DPRIM_LINESTRIP_ADJ
,
70 [GL_TRIANGLES_ADJACENCY
] = _3DPRIM_TRILIST_ADJ
,
71 [GL_TRIANGLE_STRIP_ADJACENCY
] = _3DPRIM_TRISTRIP_ADJ
,
75 static const GLenum reduced_prim
[GL_POLYGON
+1] = {
76 [GL_POINTS
] = GL_POINTS
,
77 [GL_LINES
] = GL_LINES
,
78 [GL_LINE_LOOP
] = GL_LINES
,
79 [GL_LINE_STRIP
] = GL_LINES
,
80 [GL_TRIANGLES
] = GL_TRIANGLES
,
81 [GL_TRIANGLE_STRIP
] = GL_TRIANGLES
,
82 [GL_TRIANGLE_FAN
] = GL_TRIANGLES
,
83 [GL_QUADS
] = GL_TRIANGLES
,
84 [GL_QUAD_STRIP
] = GL_TRIANGLES
,
85 [GL_POLYGON
] = GL_TRIANGLES
89 get_hw_prim_for_gl_prim(int mode
)
91 if (mode
>= BRW_PRIM_OFFSET
)
92 return mode
- BRW_PRIM_OFFSET
;
94 assert(mode
< ARRAY_SIZE(prim_to_hw_prim
));
95 return prim_to_hw_prim
[mode
];
100 /* When the primitive changes, set a state bit and re-validate. Not
101 * the nicest and would rather deal with this by having all the
102 * programs be immune to the active primitive (ie. cope with all
103 * possibilities). That may not be realistic however.
106 brw_set_prim(struct brw_context
*brw
, const struct _mesa_prim
*prim
)
108 struct gl_context
*ctx
= &brw
->ctx
;
109 uint32_t hw_prim
= get_hw_prim_for_gl_prim(prim
->mode
);
111 DBG("PRIM: %s\n", _mesa_enum_to_string(prim
->mode
));
113 /* Slight optimization to avoid the GS program when not needed:
115 if (prim
->mode
== GL_QUAD_STRIP
&&
116 ctx
->Light
.ShadeModel
!= GL_FLAT
&&
117 ctx
->Polygon
.FrontMode
== GL_FILL
&&
118 ctx
->Polygon
.BackMode
== GL_FILL
)
119 hw_prim
= _3DPRIM_TRISTRIP
;
121 if (prim
->mode
== GL_QUADS
&& prim
->count
== 4 &&
122 ctx
->Light
.ShadeModel
!= GL_FLAT
&&
123 ctx
->Polygon
.FrontMode
== GL_FILL
&&
124 ctx
->Polygon
.BackMode
== GL_FILL
) {
125 hw_prim
= _3DPRIM_TRIFAN
;
128 if (hw_prim
!= brw
->primitive
) {
129 brw
->primitive
= hw_prim
;
130 brw
->ctx
.NewDriverState
|= BRW_NEW_PRIMITIVE
;
132 if (reduced_prim
[prim
->mode
] != brw
->reduced_primitive
) {
133 brw
->reduced_primitive
= reduced_prim
[prim
->mode
];
134 brw
->ctx
.NewDriverState
|= BRW_NEW_REDUCED_PRIMITIVE
;
140 gen6_set_prim(struct brw_context
*brw
, const struct _mesa_prim
*prim
)
142 const struct gl_context
*ctx
= &brw
->ctx
;
145 DBG("PRIM: %s\n", _mesa_enum_to_string(prim
->mode
));
147 if (prim
->mode
== GL_PATCHES
) {
148 hw_prim
= _3DPRIM_PATCHLIST(ctx
->TessCtrlProgram
.patch_vertices
);
150 hw_prim
= get_hw_prim_for_gl_prim(prim
->mode
);
153 if (hw_prim
!= brw
->primitive
) {
154 brw
->primitive
= hw_prim
;
155 brw
->ctx
.NewDriverState
|= BRW_NEW_PRIMITIVE
;
156 if (prim
->mode
== GL_PATCHES
)
157 brw
->ctx
.NewDriverState
|= BRW_NEW_PATCH_PRIMITIVE
;
163 * The hardware is capable of removing dangling vertices on its own; however,
164 * prior to Gen6, we sometimes convert quads into trifans (and quad strips
165 * into tristrips), since pre-Gen6 hardware requires a GS to render quads.
166 * This function manually trims dangling vertices from a draw call involving
167 * quads so that those dangling vertices won't get drawn when we convert to
171 trim(GLenum prim
, GLuint length
)
173 if (prim
== GL_QUAD_STRIP
)
174 return length
> 3 ? (length
- length
% 2) : 0;
175 else if (prim
== GL_QUADS
)
176 return length
- length
% 4;
183 brw_emit_prim(struct brw_context
*brw
,
184 const struct _mesa_prim
*prim
,
187 int verts_per_instance
;
188 int vertex_access_type
;
191 DBG("PRIM: %s %d %d\n", _mesa_enum_to_string(prim
->mode
),
192 prim
->start
, prim
->count
);
194 int start_vertex_location
= prim
->start
;
195 int base_vertex_location
= prim
->basevertex
;
198 vertex_access_type
= brw
->gen
>= 7 ?
199 GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM
:
200 GEN4_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM
;
201 start_vertex_location
+= brw
->ib
.start_vertex_offset
;
202 base_vertex_location
+= brw
->vb
.start_vertex_bias
;
204 vertex_access_type
= brw
->gen
>= 7 ?
205 GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL
:
206 GEN4_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL
;
207 start_vertex_location
+= brw
->vb
.start_vertex_bias
;
210 /* We only need to trim the primitive count on pre-Gen6. */
212 verts_per_instance
= trim(prim
->mode
, prim
->count
);
214 verts_per_instance
= prim
->count
;
216 /* If nothing to emit, just return. */
217 if (verts_per_instance
== 0 && !prim
->is_indirect
)
220 /* If we're set to always flush, do it before and after the primitive emit.
221 * We want to catch both missed flushes that hurt instruction/state cache
222 * and missed flushes of the render cache as it heads to other parts of
223 * the besides the draw code.
225 if (brw
->always_flush_cache
)
226 brw_emit_mi_flush(brw
);
228 /* If indirect, emit a bunch of loads from the indirect BO. */
229 if (prim
->is_indirect
) {
230 struct gl_buffer_object
*indirect_buffer
= brw
->ctx
.DrawIndirectBuffer
;
231 drm_intel_bo
*bo
= intel_bufferobj_buffer(brw
,
232 intel_buffer_object(indirect_buffer
),
233 prim
->indirect_offset
, 5 * sizeof(GLuint
));
235 indirect_flag
= GEN7_3DPRIM_INDIRECT_PARAMETER_ENABLE
;
237 brw_load_register_mem(brw
, GEN7_3DPRIM_VERTEX_COUNT
, bo
,
238 I915_GEM_DOMAIN_VERTEX
, 0,
239 prim
->indirect_offset
+ 0);
240 brw_load_register_mem(brw
, GEN7_3DPRIM_INSTANCE_COUNT
, bo
,
241 I915_GEM_DOMAIN_VERTEX
, 0,
242 prim
->indirect_offset
+ 4);
244 brw_load_register_mem(brw
, GEN7_3DPRIM_START_VERTEX
, bo
,
245 I915_GEM_DOMAIN_VERTEX
, 0,
246 prim
->indirect_offset
+ 8);
248 brw_load_register_mem(brw
, GEN7_3DPRIM_BASE_VERTEX
, bo
,
249 I915_GEM_DOMAIN_VERTEX
, 0,
250 prim
->indirect_offset
+ 12);
251 brw_load_register_mem(brw
, GEN7_3DPRIM_START_INSTANCE
, bo
,
252 I915_GEM_DOMAIN_VERTEX
, 0,
253 prim
->indirect_offset
+ 16);
255 brw_load_register_mem(brw
, GEN7_3DPRIM_START_INSTANCE
, bo
,
256 I915_GEM_DOMAIN_VERTEX
, 0,
257 prim
->indirect_offset
+ 12);
259 OUT_BATCH(MI_LOAD_REGISTER_IMM
| (3 - 2));
260 OUT_BATCH(GEN7_3DPRIM_BASE_VERTEX
);
268 BEGIN_BATCH(brw
->gen
>= 7 ? 7 : 6);
271 const int predicate_enable
=
272 (brw
->predicate
.state
== BRW_PREDICATE_STATE_USE_BIT
)
273 ? GEN7_3DPRIM_PREDICATE_ENABLE
: 0;
275 OUT_BATCH(CMD_3D_PRIM
<< 16 | (7 - 2) | indirect_flag
| predicate_enable
);
276 OUT_BATCH(hw_prim
| vertex_access_type
);
278 OUT_BATCH(CMD_3D_PRIM
<< 16 | (6 - 2) |
279 hw_prim
<< GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT
|
282 OUT_BATCH(verts_per_instance
);
283 OUT_BATCH(start_vertex_location
);
284 OUT_BATCH(prim
->num_instances
);
285 OUT_BATCH(prim
->base_instance
);
286 OUT_BATCH(base_vertex_location
);
289 if (brw
->always_flush_cache
)
290 brw_emit_mi_flush(brw
);
295 brw_merge_inputs(struct brw_context
*brw
,
296 const struct gl_client_array
*arrays
[])
298 const struct gl_context
*ctx
= &brw
->ctx
;
301 for (i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
302 drm_intel_bo_unreference(brw
->vb
.buffers
[i
].bo
);
303 brw
->vb
.buffers
[i
].bo
= NULL
;
305 brw
->vb
.nr_buffers
= 0;
307 for (i
= 0; i
< VERT_ATTRIB_MAX
; i
++) {
308 brw
->vb
.inputs
[i
].buffer
= -1;
309 brw
->vb
.inputs
[i
].glarray
= arrays
[i
];
312 if (brw
->gen
< 8 && !brw
->is_haswell
) {
313 struct gl_program
*vp
= &ctx
->VertexProgram
._Current
->Base
;
314 /* Prior to Haswell, the hardware can't natively support GL_FIXED or
315 * 2_10_10_10_REV vertex formats. Set appropriate workaround flags.
317 for (i
= 0; i
< VERT_ATTRIB_MAX
; i
++) {
318 if (!(vp
->InputsRead
& BITFIELD64_BIT(i
)))
321 uint8_t wa_flags
= 0;
323 switch (brw
->vb
.inputs
[i
].glarray
->Type
) {
326 wa_flags
= brw
->vb
.inputs
[i
].glarray
->Size
;
329 case GL_INT_2_10_10_10_REV
:
330 wa_flags
|= BRW_ATTRIB_WA_SIGN
;
333 case GL_UNSIGNED_INT_2_10_10_10_REV
:
334 if (brw
->vb
.inputs
[i
].glarray
->Format
== GL_BGRA
)
335 wa_flags
|= BRW_ATTRIB_WA_BGRA
;
337 if (brw
->vb
.inputs
[i
].glarray
->Normalized
)
338 wa_flags
|= BRW_ATTRIB_WA_NORMALIZE
;
339 else if (!brw
->vb
.inputs
[i
].glarray
->Integer
)
340 wa_flags
|= BRW_ATTRIB_WA_SCALE
;
345 if (brw
->vb
.attrib_wa_flags
[i
] != wa_flags
) {
346 brw
->vb
.attrib_wa_flags
[i
] = wa_flags
;
347 brw
->ctx
.NewDriverState
|= BRW_NEW_VS_ATTRIB_WORKAROUNDS
;
354 * \brief Call this after drawing to mark which buffers need resolving
356 * If the depth buffer was written to and if it has an accompanying HiZ
357 * buffer, then mark that it needs a depth resolve.
359 * If the color buffer is a multisample window system buffer, then
360 * mark that it needs a downsample.
362 * Also mark any render targets which will be textured as needing a render
366 brw_postdraw_set_buffers_need_resolve(struct brw_context
*brw
)
368 struct gl_context
*ctx
= &brw
->ctx
;
369 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
371 struct intel_renderbuffer
*front_irb
= NULL
;
372 struct intel_renderbuffer
*back_irb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
373 struct intel_renderbuffer
*depth_irb
= intel_get_renderbuffer(fb
, BUFFER_DEPTH
);
374 struct intel_renderbuffer
*stencil_irb
= intel_get_renderbuffer(fb
, BUFFER_STENCIL
);
375 struct gl_renderbuffer_attachment
*depth_att
= &fb
->Attachment
[BUFFER_DEPTH
];
377 if (_mesa_is_front_buffer_drawing(fb
))
378 front_irb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
381 front_irb
->need_downsample
= true;
383 back_irb
->need_downsample
= true;
384 if (depth_irb
&& ctx
->Depth
.Mask
) {
385 intel_renderbuffer_att_set_needs_depth_resolve(depth_att
);
386 brw_render_cache_set_add_bo(brw
, depth_irb
->mt
->bo
);
389 if (ctx
->Extensions
.ARB_stencil_texturing
&&
390 stencil_irb
&& ctx
->Stencil
._WriteEnabled
) {
391 brw_render_cache_set_add_bo(brw
, stencil_irb
->mt
->bo
);
394 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
395 struct intel_renderbuffer
*irb
=
396 intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
399 brw_render_cache_set_add_bo(brw
, irb
->mt
->bo
);
403 /* May fail if out of video memory for texture or vbo upload, or on
404 * fallback conditions.
407 brw_try_draw_prims(struct gl_context
*ctx
,
408 const struct gl_client_array
*arrays
[],
409 const struct _mesa_prim
*prims
,
411 const struct _mesa_index_buffer
*ib
,
414 struct gl_buffer_object
*indirect
)
416 struct brw_context
*brw
= brw_context(ctx
);
418 bool fail_next
= false;
421 _mesa_update_state(ctx
);
423 /* We have to validate the textures *before* checking for fallbacks;
424 * otherwise, the software fallback won't be able to rely on the
425 * texture state, the firstLevel and lastLevel fields won't be
426 * set in the intel texture object (they'll both be 0), and the
427 * software fallback will segfault if it attempts to access any
428 * texture level other than level 0.
430 brw_validate_textures(brw
);
432 /* Find the highest sampler unit used by each shader program. A bit-count
433 * won't work since ARB programs use the texture unit number as the sampler
436 brw
->wm
.base
.sampler_count
=
437 _mesa_fls(ctx
->FragmentProgram
._Current
->Base
.SamplersUsed
);
438 brw
->gs
.base
.sampler_count
= ctx
->GeometryProgram
._Current
?
439 _mesa_fls(ctx
->GeometryProgram
._Current
->Base
.SamplersUsed
) : 0;
440 brw
->tes
.base
.sampler_count
= ctx
->TessEvalProgram
._Current
?
441 _mesa_fls(ctx
->TessEvalProgram
._Current
->Base
.SamplersUsed
) : 0;
442 brw
->tcs
.base
.sampler_count
= ctx
->TessCtrlProgram
._Current
?
443 _mesa_fls(ctx
->TessCtrlProgram
._Current
->Base
.SamplersUsed
) : 0;
444 brw
->vs
.base
.sampler_count
=
445 _mesa_fls(ctx
->VertexProgram
._Current
->Base
.SamplersUsed
);
447 intel_prepare_render(brw
);
449 /* This workaround has to happen outside of brw_upload_render_state()
450 * because it may flush the batchbuffer for a blit, affecting the state
453 brw_workaround_depthstencil_alignment(brw
, 0);
455 /* Bind all inputs, derive varying and size information:
457 brw_merge_inputs(brw
, arrays
);
460 brw
->ctx
.NewDriverState
|= BRW_NEW_INDICES
;
462 brw
->vb
.min_index
= min_index
;
463 brw
->vb
.max_index
= max_index
;
464 brw
->ctx
.NewDriverState
|= BRW_NEW_VERTICES
;
466 for (i
= 0; i
< nr_prims
; i
++) {
467 int estimated_max_prim_size
;
468 const int sampler_state_size
= 16;
470 estimated_max_prim_size
= 512; /* batchbuffer commands */
471 estimated_max_prim_size
+= BRW_MAX_TEX_UNIT
*
472 (sampler_state_size
+ sizeof(struct gen5_sampler_default_color
));
473 estimated_max_prim_size
+= 1024; /* gen6 VS push constants */
474 estimated_max_prim_size
+= 1024; /* gen6 WM push constants */
475 estimated_max_prim_size
+= 512; /* misc. pad */
477 /* Flush the batch if it's approaching full, so that we don't wrap while
478 * we've got validated state that needs to be in the same batch as the
481 intel_batchbuffer_require_space(brw
, estimated_max_prim_size
, RENDER_RING
);
482 intel_batchbuffer_save_state(brw
);
484 if (brw
->num_instances
!= prims
[i
].num_instances
||
485 brw
->basevertex
!= prims
[i
].basevertex
) {
486 brw
->num_instances
= prims
[i
].num_instances
;
487 brw
->basevertex
= prims
[i
].basevertex
;
488 if (i
> 0) { /* For i == 0 we just did this before the loop */
489 brw
->ctx
.NewDriverState
|= BRW_NEW_VERTICES
;
490 brw_merge_inputs(brw
, arrays
);
494 /* Determine if we need to flag BRW_NEW_VERTICES for updating the
495 * gl_BaseVertexARB or gl_BaseInstanceARB values. For indirect draw, we
496 * always flag if the shader uses one of the values. For direct draws,
497 * we only flag if the values change.
499 const int new_basevertex
=
500 prims
[i
].indexed
? prims
[i
].basevertex
: prims
[i
].start
;
501 const int new_baseinstance
= prims
[i
].base_instance
;
503 const bool uses_draw_parameters
=
504 brw
->vs
.prog_data
->uses_basevertex
||
505 brw
->vs
.prog_data
->uses_baseinstance
;
507 if ((uses_draw_parameters
&& prims
[i
].is_indirect
) ||
508 (brw
->vs
.prog_data
->uses_basevertex
&&
509 brw
->draw
.params
.gl_basevertex
!= new_basevertex
) ||
510 (brw
->vs
.prog_data
->uses_baseinstance
&&
511 brw
->draw
.params
.gl_baseinstance
!= new_baseinstance
))
512 brw
->ctx
.NewDriverState
|= BRW_NEW_VERTICES
;
515 brw
->draw
.params
.gl_basevertex
= new_basevertex
;
516 brw
->draw
.params
.gl_baseinstance
= new_baseinstance
;
517 drm_intel_bo_unreference(brw
->draw
.draw_params_bo
);
519 if (prims
[i
].is_indirect
) {
520 /* Point draw_params_bo at the indirect buffer. */
521 brw
->draw
.draw_params_bo
=
522 intel_buffer_object(ctx
->DrawIndirectBuffer
)->buffer
;
523 drm_intel_bo_reference(brw
->draw
.draw_params_bo
);
524 brw
->draw
.draw_params_offset
=
525 prims
[i
].indirect_offset
+ (prims
[i
].indexed
? 12 : 8);
527 /* Set draw_params_bo to NULL so brw_prepare_vertices knows it
528 * has to upload gl_BaseVertex and such if they're needed.
530 brw
->draw
.draw_params_bo
= NULL
;
531 brw
->draw
.draw_params_offset
= 0;
534 /* gl_DrawID always needs its own vertex buffer since it's not part of
535 * the indirect parameter buffer. If the program uses gl_DrawID we need
536 * to flag BRW_NEW_VERTICES. For the first iteration, we don't have
537 * valid brw->vs.prog_data, but we always flag BRW_NEW_VERTICES before
540 brw
->draw
.gl_drawid
= prims
[i
].draw_id
;
541 drm_intel_bo_unreference(brw
->draw
.draw_id_bo
);
542 brw
->draw
.draw_id_bo
= NULL
;
543 if (i
> 0 && brw
->vs
.prog_data
->uses_drawid
)
544 brw
->ctx
.NewDriverState
|= BRW_NEW_VERTICES
;
547 brw_set_prim(brw
, &prims
[i
]);
549 gen6_set_prim(brw
, &prims
[i
]);
553 /* Note that before the loop, brw->ctx.NewDriverState was set to != 0, and
554 * that the state updated in the loop outside of this block is that in
555 * *_set_prim or intel_batchbuffer_flush(), which only impacts
556 * brw->ctx.NewDriverState.
558 if (brw
->ctx
.NewDriverState
) {
559 brw
->no_batch_wrap
= true;
560 brw_upload_render_state(brw
);
563 brw_emit_prim(brw
, &prims
[i
], brw
->primitive
);
565 brw
->no_batch_wrap
= false;
567 if (dri_bufmgr_check_aperture_space(&brw
->batch
.bo
, 1)) {
569 intel_batchbuffer_reset_to_saved(brw
);
570 intel_batchbuffer_flush(brw
);
574 int ret
= intel_batchbuffer_flush(brw
);
575 WARN_ONCE(ret
== -ENOSPC
,
576 "i965: Single primitive emit exceeded "
577 "available aperture space\n");
581 /* Now that we know we haven't run out of aperture space, we can safely
582 * reset the dirty bits.
584 if (brw
->ctx
.NewDriverState
)
585 brw_render_state_finished(brw
);
588 if (brw
->always_flush_batch
)
589 intel_batchbuffer_flush(brw
);
591 brw_state_cache_check_size(brw
);
592 brw_postdraw_set_buffers_need_resolve(brw
);
598 brw_draw_prims(struct gl_context
*ctx
,
599 const struct _mesa_prim
*prims
,
601 const struct _mesa_index_buffer
*ib
,
602 GLboolean index_bounds_valid
,
605 struct gl_transform_feedback_object
*unused_tfb_object
,
607 struct gl_buffer_object
*indirect
)
609 struct brw_context
*brw
= brw_context(ctx
);
610 const struct gl_client_array
**arrays
= ctx
->Array
._DrawArrays
;
612 assert(unused_tfb_object
== NULL
);
614 if (!brw_check_conditional_render(brw
))
617 /* Handle primitive restart if needed */
618 if (brw_handle_primitive_restart(ctx
, prims
, nr_prims
, ib
, indirect
)) {
619 /* The draw was handled, so we can exit now */
623 /* Do GL_SELECT and GL_FEEDBACK rendering using swrast, even though it
624 * won't support all the extensions we support.
626 if (ctx
->RenderMode
!= GL_RENDER
) {
627 perf_debug("%s render mode not supported in hardware\n",
628 _mesa_enum_to_string(ctx
->RenderMode
));
629 _swsetup_Wakeup(ctx
);
631 _tnl_draw_prims(ctx
, prims
, nr_prims
, ib
,
632 index_bounds_valid
, min_index
, max_index
, NULL
, 0, NULL
);
636 /* If we're going to have to upload any of the user's vertex arrays, then
637 * get the minimum and maximum of their index buffer so we know what range
640 if (!index_bounds_valid
&& !vbo_all_varyings_in_vbos(arrays
)) {
641 perf_debug("Scanning index buffer to compute index buffer bounds. "
642 "Use glDrawRangeElements() to avoid this.\n");
643 vbo_get_minmax_indices(ctx
, prims
, ib
, &min_index
, &max_index
, nr_prims
);
646 /* Try drawing with the hardware, but don't do anything else if we can't
647 * manage it. swrast doesn't support our featureset, so we can't fall back
650 brw_try_draw_prims(ctx
, arrays
, prims
, nr_prims
, ib
, min_index
, max_index
,
655 brw_draw_init(struct brw_context
*brw
)
657 struct gl_context
*ctx
= &brw
->ctx
;
658 struct vbo_context
*vbo
= vbo_context(ctx
);
660 /* Register our drawing function:
662 vbo
->draw_prims
= brw_draw_prims
;
664 for (int i
= 0; i
< VERT_ATTRIB_MAX
; i
++)
665 brw
->vb
.inputs
[i
].buffer
= -1;
666 brw
->vb
.nr_buffers
= 0;
667 brw
->vb
.nr_enabled
= 0;
671 brw_draw_destroy(struct brw_context
*brw
)
675 for (i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
676 drm_intel_bo_unreference(brw
->vb
.buffers
[i
].bo
);
677 brw
->vb
.buffers
[i
].bo
= NULL
;
679 brw
->vb
.nr_buffers
= 0;
681 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
682 brw
->vb
.enabled
[i
]->buffer
= -1;
684 brw
->vb
.nr_enabled
= 0;
686 drm_intel_bo_unreference(brw
->ib
.bo
);