1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <sys/errno.h>
30 #include "main/glheader.h"
31 #include "main/context.h"
32 #include "main/condrender.h"
33 #include "main/samplerobj.h"
34 #include "main/state.h"
35 #include "main/enums.h"
36 #include "main/macros.h"
38 #include "vbo/vbo_context.h"
39 #include "swrast/swrast.h"
40 #include "swrast_setup/swrast_setup.h"
41 #include "drivers/common/meta.h"
44 #include "brw_defines.h"
45 #include "brw_context.h"
46 #include "brw_state.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_fbo.h"
50 #include "intel_mipmap_tree.h"
51 #include "intel_regions.h"
53 #define FILE_DEBUG_FLAG DEBUG_PRIMS
55 static GLuint prim_to_hw_prim
[GL_POLYGON
+1] = {
69 static const GLenum reduced_prim
[GL_POLYGON
+1] = {
83 /* When the primitive changes, set a state bit and re-validate. Not
84 * the nicest and would rather deal with this by having all the
85 * programs be immune to the active primitive (ie. cope with all
86 * possibilities). That may not be realistic however.
88 static void brw_set_prim(struct brw_context
*brw
,
89 const struct _mesa_prim
*prim
)
91 struct gl_context
*ctx
= &brw
->intel
.ctx
;
92 uint32_t hw_prim
= prim_to_hw_prim
[prim
->mode
];
94 DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim
->mode
));
96 /* Slight optimization to avoid the GS program when not needed:
98 if (prim
->mode
== GL_QUAD_STRIP
&&
99 ctx
->Light
.ShadeModel
!= GL_FLAT
&&
100 ctx
->Polygon
.FrontMode
== GL_FILL
&&
101 ctx
->Polygon
.BackMode
== GL_FILL
)
102 hw_prim
= _3DPRIM_TRISTRIP
;
104 if (prim
->mode
== GL_QUADS
&& prim
->count
== 4 &&
105 ctx
->Light
.ShadeModel
!= GL_FLAT
&&
106 ctx
->Polygon
.FrontMode
== GL_FILL
&&
107 ctx
->Polygon
.BackMode
== GL_FILL
) {
108 hw_prim
= _3DPRIM_TRIFAN
;
111 if (hw_prim
!= brw
->primitive
) {
112 brw
->primitive
= hw_prim
;
113 brw
->state
.dirty
.brw
|= BRW_NEW_PRIMITIVE
;
115 if (reduced_prim
[prim
->mode
] != brw
->intel
.reduced_primitive
) {
116 brw
->intel
.reduced_primitive
= reduced_prim
[prim
->mode
];
117 brw
->state
.dirty
.brw
|= BRW_NEW_REDUCED_PRIMITIVE
;
122 static void gen6_set_prim(struct brw_context
*brw
,
123 const struct _mesa_prim
*prim
)
127 DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim
->mode
));
130 assert(prim
->mode
== GL_TRIANGLES
);
131 hw_prim
= _3DPRIM_RECTLIST
;
133 hw_prim
= prim_to_hw_prim
[prim
->mode
];
136 if (hw_prim
!= brw
->primitive
) {
137 brw
->primitive
= hw_prim
;
138 brw
->state
.dirty
.brw
|= BRW_NEW_PRIMITIVE
;
143 static GLuint
trim(GLenum prim
, GLuint length
)
145 if (prim
== GL_QUAD_STRIP
)
146 return length
> 3 ? (length
- length
% 2) : 0;
147 else if (prim
== GL_QUADS
)
148 return length
- length
% 4;
154 static void brw_emit_prim(struct brw_context
*brw
,
155 const struct _mesa_prim
*prim
,
158 struct intel_context
*intel
= &brw
->intel
;
159 int verts_per_instance
;
160 int vertex_access_type
;
161 int start_vertex_location
;
162 int base_vertex_location
;
164 DBG("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim
->mode
),
165 prim
->start
, prim
->count
);
167 start_vertex_location
= prim
->start
;
168 base_vertex_location
= prim
->basevertex
;
170 vertex_access_type
= GEN4_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM
;
171 start_vertex_location
+= brw
->ib
.start_vertex_offset
;
172 base_vertex_location
+= brw
->vb
.start_vertex_bias
;
174 vertex_access_type
= GEN4_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL
;
175 start_vertex_location
+= brw
->vb
.start_vertex_bias
;
178 verts_per_instance
= trim(prim
->mode
, prim
->count
);
180 /* If nothing to emit, just return. */
181 if (verts_per_instance
== 0)
184 /* If we're set to always flush, do it before and after the primitive emit.
185 * We want to catch both missed flushes that hurt instruction/state cache
186 * and missed flushes of the render cache as it heads to other parts of
187 * the besides the draw code.
189 if (intel
->always_flush_cache
) {
190 intel_batchbuffer_emit_mi_flush(intel
);
194 OUT_BATCH(CMD_3D_PRIM
<< 16 | (6 - 2) |
195 hw_prim
<< GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT
|
197 OUT_BATCH(verts_per_instance
);
198 OUT_BATCH(start_vertex_location
);
199 OUT_BATCH(1); // instance count
200 OUT_BATCH(0); // start instance location
201 OUT_BATCH(base_vertex_location
);
204 intel
->batch
.need_workaround_flush
= true;
206 if (intel
->always_flush_cache
) {
207 intel_batchbuffer_emit_mi_flush(intel
);
211 static void gen7_emit_prim(struct brw_context
*brw
,
212 const struct _mesa_prim
*prim
,
215 struct intel_context
*intel
= &brw
->intel
;
216 int verts_per_instance
;
217 int vertex_access_type
;
218 int start_vertex_location
;
219 int base_vertex_location
;
221 DBG("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim
->mode
),
222 prim
->start
, prim
->count
);
224 start_vertex_location
= prim
->start
;
225 base_vertex_location
= prim
->basevertex
;
227 vertex_access_type
= GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM
;
228 start_vertex_location
+= brw
->ib
.start_vertex_offset
;
229 base_vertex_location
+= brw
->vb
.start_vertex_bias
;
231 vertex_access_type
= GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL
;
232 start_vertex_location
+= brw
->vb
.start_vertex_bias
;
235 verts_per_instance
= trim(prim
->mode
, prim
->count
);
237 /* If nothing to emit, just return. */
238 if (verts_per_instance
== 0)
241 /* If we're set to always flush, do it before and after the primitive emit.
242 * We want to catch both missed flushes that hurt instruction/state cache
243 * and missed flushes of the render cache as it heads to other parts of
244 * the besides the draw code.
246 if (intel
->always_flush_cache
) {
247 intel_batchbuffer_emit_mi_flush(intel
);
251 OUT_BATCH(CMD_3D_PRIM
<< 16 | (7 - 2));
252 OUT_BATCH(hw_prim
| vertex_access_type
);
253 OUT_BATCH(verts_per_instance
);
254 OUT_BATCH(start_vertex_location
);
255 OUT_BATCH(1); // instance count
256 OUT_BATCH(0); // start instance location
257 OUT_BATCH(base_vertex_location
);
260 if (intel
->always_flush_cache
) {
261 intel_batchbuffer_emit_mi_flush(intel
);
266 static void brw_merge_inputs( struct brw_context
*brw
,
267 const struct gl_client_array
*arrays
[])
269 struct brw_vertex_info old
= brw
->vb
.info
;
272 for (i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
273 drm_intel_bo_unreference(brw
->vb
.buffers
[i
].bo
);
274 brw
->vb
.buffers
[i
].bo
= NULL
;
276 brw
->vb
.nr_buffers
= 0;
278 memset(&brw
->vb
.info
, 0, sizeof(brw
->vb
.info
));
280 for (i
= 0; i
< VERT_ATTRIB_MAX
; i
++) {
281 brw
->vb
.inputs
[i
].buffer
= -1;
282 brw
->vb
.inputs
[i
].glarray
= arrays
[i
];
283 brw
->vb
.inputs
[i
].attrib
= (gl_vert_attrib
) i
;
285 if (arrays
[i
]->StrideB
!= 0)
286 brw
->vb
.info
.sizes
[i
/16] |= (brw
->vb
.inputs
[i
].glarray
->Size
- 1) <<
290 /* Raise statechanges if input sizes have changed. */
291 if (memcmp(brw
->vb
.info
.sizes
, old
.sizes
, sizeof(old
.sizes
)) != 0)
292 brw
->state
.dirty
.brw
|= BRW_NEW_INPUT_DIMENSIONS
;
296 * \brief Resolve buffers before drawing.
298 * Resolve the depth buffer's HiZ buffer and resolve the depth buffer of each
299 * enabled depth texture.
301 * (In the future, this will also perform MSAA resolves).
304 brw_predraw_resolve_buffers(struct brw_context
*brw
)
306 struct gl_context
*ctx
= &brw
->intel
.ctx
;
307 struct intel_context
*intel
= &brw
->intel
;
308 struct intel_renderbuffer
*depth_irb
;
309 struct intel_texture_object
*tex_obj
;
310 bool did_resolve
= false;
312 /* Avoid recursive HiZ op. */
317 /* Resolve the depth buffer's HiZ buffer. */
318 depth_irb
= intel_get_renderbuffer(ctx
->DrawBuffer
, BUFFER_DEPTH
);
319 if (depth_irb
&& depth_irb
->mt
) {
320 did_resolve
|= intel_renderbuffer_resolve_hiz(intel
, depth_irb
);
323 /* Resolve depth buffer of each enabled depth texture. */
324 for (int i
= 0; i
< BRW_MAX_TEX_UNIT
; i
++) {
325 if (!ctx
->Texture
.Unit
[i
]._ReallyEnabled
)
327 tex_obj
= intel_texture_object(ctx
->Texture
.Unit
[i
]._Current
);
328 if (!tex_obj
|| !tex_obj
->mt
)
330 did_resolve
|= intel_miptree_all_slices_resolve_depth(intel
, tex_obj
->mt
);
334 /* Call vbo_bind_array() to synchronize the vbo module's vertex
335 * attributes to the gl_context's.
339 * The vbo module tracks vertex attributes separately from the
340 * gl_context. Specifically, the vbo module maintins vertex attributes
341 * in vbo_exec_context::array::inputs, which is synchronized with
342 * gl_context::Array::ArrayObj::VertexAttrib by vbo_bind_array().
343 * vbo_draw_arrays() calls vbo_bind_array() to perform the
344 * synchronization before calling the real draw call,
345 * vbo_context::draw_arrays.
347 * At this point (after performing a resolve meta-op but before calling
348 * vbo_bind_array), the gl_context's vertex attributes have been
349 * restored to their original state (that is, their state before the
350 * meta-op began), but the vbo module's vertex attribute are those used
351 * in the last meta-op. Therefore we must manually synchronize the two with
352 * vbo_bind_array() before continuing with the original draw command.
354 _mesa_update_state(ctx
);
355 vbo_bind_arrays(ctx
);
356 _mesa_update_state(ctx
);
361 * \brief Call this after drawing to mark which buffers need resolving
363 * If the depth buffer was written to and if it has an accompanying HiZ
364 * buffer, then mark that it needs a depth resolve.
366 * (In the future, this will also mark needed MSAA resolves).
368 static void brw_postdraw_set_buffers_need_resolve(struct brw_context
*brw
)
370 struct gl_context
*ctx
= &brw
->intel
.ctx
;
371 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
372 struct intel_renderbuffer
*depth_irb
=
373 intel_get_renderbuffer(fb
, BUFFER_DEPTH
);
378 intel_renderbuffer_set_needs_depth_resolve(depth_irb
);
383 verts_per_prim(GLenum mode
)
392 case GL_TRIANGLE_STRIP
:
393 case GL_TRIANGLE_FAN
:
401 "unknown prim type in transform feedback primitive count");
407 * Update internal counters based on the the drawing operation described in
411 brw_update_primitive_count(struct brw_context
*brw
,
412 const struct _mesa_prim
*prim
)
414 uint32_t count
= count_tessellated_primitives(prim
);
415 brw
->sol
.primitives_generated
+= count
;
416 if (brw
->intel
.ctx
.TransformFeedback
.CurrentObject
->Active
&&
417 !brw
->intel
.ctx
.TransformFeedback
.CurrentObject
->Paused
) {
418 /* Update brw->sol.svbi_0_max_index to reflect the amount by which the
419 * hardware is going to increment SVBI 0 when this drawing operation
420 * occurs. This is necessary because the kernel does not (yet) save and
421 * restore GPU registers when context switching, so we'll need to be
422 * able to reload SVBI 0 with the correct value in case we have to start
423 * a new batch buffer.
425 unsigned verts
= verts_per_prim(prim
->mode
);
426 uint32_t space_avail
=
427 (brw
->sol
.svbi_0_max_index
- brw
->sol
.svbi_0_starting_index
) / verts
;
428 uint32_t primitives_written
= MIN2 (space_avail
, count
);
429 brw
->sol
.svbi_0_starting_index
+= verts
* primitives_written
;
431 /* And update the TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN query. */
432 brw
->sol
.primitives_written
+= primitives_written
;
436 /* May fail if out of video memory for texture or vbo upload, or on
437 * fallback conditions.
439 static bool brw_try_draw_prims( struct gl_context
*ctx
,
440 const struct gl_client_array
*arrays
[],
441 const struct _mesa_prim
*prim
,
443 const struct _mesa_index_buffer
*ib
,
447 struct intel_context
*intel
= intel_context(ctx
);
448 struct brw_context
*brw
= brw_context(ctx
);
451 bool fail_next
= false;
454 _mesa_update_state( ctx
);
456 /* We have to validate the textures *before* checking for fallbacks;
457 * otherwise, the software fallback won't be able to rely on the
458 * texture state, the firstLevel and lastLevel fields won't be
459 * set in the intel texture object (they'll both be 0), and the
460 * software fallback will segfault if it attempts to access any
461 * texture level other than level 0.
463 brw_validate_textures( brw
);
465 /* Resolves must occur after updating state and finalizing textures but
466 * before setting up any hardware state for this draw call.
468 brw_predraw_resolve_buffers(brw
);
470 /* Bind all inputs, derive varying and size information:
472 brw_merge_inputs( brw
, arrays
);
475 brw
->state
.dirty
.brw
|= BRW_NEW_INDICES
;
477 brw
->vb
.min_index
= min_index
;
478 brw
->vb
.max_index
= max_index
;
479 brw
->state
.dirty
.brw
|= BRW_NEW_VERTICES
;
481 /* Have to validate state quite late. Will rebuild tnl_program,
482 * which depends on varying information.
484 * Note this is where brw->vs->prog_data.inputs_read is calculated,
485 * so can't access it earlier.
488 intel_prepare_render(intel
);
490 for (i
= 0; i
< nr_prims
; i
++) {
491 int estimated_max_prim_size
;
493 estimated_max_prim_size
= 512; /* batchbuffer commands */
494 estimated_max_prim_size
+= (BRW_MAX_TEX_UNIT
*
495 (sizeof(struct brw_sampler_state
) +
496 sizeof(struct gen5_sampler_default_color
)));
497 estimated_max_prim_size
+= 1024; /* gen6 VS push constants */
498 estimated_max_prim_size
+= 1024; /* gen6 WM push constants */
499 estimated_max_prim_size
+= 512; /* misc. pad */
501 /* Flush the batch if it's approaching full, so that we don't wrap while
502 * we've got validated state that needs to be in the same batch as the
505 intel_batchbuffer_require_space(intel
, estimated_max_prim_size
, false);
506 intel_batchbuffer_save_state(intel
);
509 brw_set_prim(brw
, &prim
[i
]);
511 gen6_set_prim(brw
, &prim
[i
]);
514 /* Note that before the loop, brw->state.dirty.brw was set to != 0, and
515 * that the state updated in the loop outside of this block is that in
516 * *_set_prim or intel_batchbuffer_flush(), which only impacts
517 * brw->state.dirty.brw.
519 if (brw
->state
.dirty
.brw
) {
520 intel
->no_batch_wrap
= true;
521 brw_upload_state(brw
);
523 if (unlikely(brw
->intel
.Fallback
)) {
524 intel
->no_batch_wrap
= false;
531 gen7_emit_prim(brw
, &prim
[i
], brw
->primitive
);
533 brw_emit_prim(brw
, &prim
[i
], brw
->primitive
);
535 intel
->no_batch_wrap
= false;
537 if (dri_bufmgr_check_aperture_space(&intel
->batch
.bo
, 1)) {
539 intel_batchbuffer_reset_to_saved(intel
);
540 intel_batchbuffer_flush(intel
);
544 if (intel_batchbuffer_flush(intel
) == -ENOSPC
) {
545 static bool warned
= false;
548 fprintf(stderr
, "i965: Single primitive emit exceeded"
549 "available aperture space\n");
558 if (!_mesa_meta_in_progress(ctx
))
559 brw_update_primitive_count(brw
, &prim
[i
]);
562 if (intel
->always_flush_batch
)
563 intel_batchbuffer_flush(intel
);
566 brw_state_cache_check_size(brw
);
567 brw_postdraw_set_buffers_need_resolve(brw
);
572 void brw_draw_prims( struct gl_context
*ctx
,
573 const struct gl_client_array
*arrays
[],
574 const struct _mesa_prim
*prim
,
576 const struct _mesa_index_buffer
*ib
,
577 GLboolean index_bounds_valid
,
580 struct gl_transform_feedback_object
*tfb_vertcount
)
584 if (!_mesa_check_conditional_render(ctx
))
587 if (!vbo_all_varyings_in_vbos(arrays
)) {
588 if (!index_bounds_valid
)
589 vbo_get_minmax_indices(ctx
, prim
, ib
, &min_index
, &max_index
, nr_prims
);
591 /* Decide if we want to rebase. If so we end up recursing once
592 * only into this function.
594 if (min_index
!= 0 && !vbo_any_varyings_in_vbos(arrays
)) {
595 vbo_rebase_prims(ctx
, arrays
,
597 ib
, min_index
, max_index
,
603 /* Make a first attempt at drawing:
605 retval
= brw_try_draw_prims(ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
);
607 /* Otherwise, we really are out of memory. Pass the drawing
608 * command to the software tnl module and which will in turn call
609 * swrast to do the drawing.
612 _swsetup_Wakeup(ctx
);
614 _tnl_draw_prims(ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
);
619 void brw_draw_init( struct brw_context
*brw
)
621 struct gl_context
*ctx
= &brw
->intel
.ctx
;
622 struct vbo_context
*vbo
= vbo_context(ctx
);
625 /* Register our drawing function:
627 vbo
->draw_prims
= brw_draw_prims
;
629 for (i
= 0; i
< VERT_ATTRIB_MAX
; i
++)
630 brw
->vb
.inputs
[i
].buffer
= -1;
631 brw
->vb
.nr_buffers
= 0;
632 brw
->vb
.nr_enabled
= 0;
635 void brw_draw_destroy( struct brw_context
*brw
)
639 for (i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
640 drm_intel_bo_unreference(brw
->vb
.buffers
[i
].bo
);
641 brw
->vb
.buffers
[i
].bo
= NULL
;
643 brw
->vb
.nr_buffers
= 0;
645 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
646 brw
->vb
.enabled
[i
]->buffer
= -1;
648 brw
->vb
.nr_enabled
= 0;
650 drm_intel_bo_unreference(brw
->ib
.bo
);