1 /**************************************************************************
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <sys/errno.h>
30 #include "main/glheader.h"
31 #include "main/context.h"
32 #include "main/condrender.h"
33 #include "main/samplerobj.h"
34 #include "main/state.h"
35 #include "main/enums.h"
36 #include "main/macros.h"
38 #include "vbo/vbo_context.h"
39 #include "swrast/swrast.h"
40 #include "swrast_setup/swrast_setup.h"
41 #include "drivers/common/meta.h"
44 #include "brw_defines.h"
45 #include "brw_context.h"
46 #include "brw_state.h"
48 #include "intel_batchbuffer.h"
49 #include "intel_fbo.h"
50 #include "intel_mipmap_tree.h"
51 #include "intel_regions.h"
53 #define FILE_DEBUG_FLAG DEBUG_PRIMS
55 static GLuint prim_to_hw_prim
[GL_POLYGON
+1] = {
69 static const GLenum reduced_prim
[GL_POLYGON
+1] = {
83 /* When the primitive changes, set a state bit and re-validate. Not
84 * the nicest and would rather deal with this by having all the
85 * programs be immune to the active primitive (ie. cope with all
86 * possibilities). That may not be realistic however.
88 static void brw_set_prim(struct brw_context
*brw
,
89 const struct _mesa_prim
*prim
)
91 struct gl_context
*ctx
= &brw
->intel
.ctx
;
92 uint32_t hw_prim
= prim_to_hw_prim
[prim
->mode
];
94 DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim
->mode
));
96 /* Slight optimization to avoid the GS program when not needed:
98 if (prim
->mode
== GL_QUAD_STRIP
&&
99 ctx
->Light
.ShadeModel
!= GL_FLAT
&&
100 ctx
->Polygon
.FrontMode
== GL_FILL
&&
101 ctx
->Polygon
.BackMode
== GL_FILL
)
102 hw_prim
= _3DPRIM_TRISTRIP
;
104 if (prim
->mode
== GL_QUADS
&& prim
->count
== 4 &&
105 ctx
->Light
.ShadeModel
!= GL_FLAT
&&
106 ctx
->Polygon
.FrontMode
== GL_FILL
&&
107 ctx
->Polygon
.BackMode
== GL_FILL
) {
108 hw_prim
= _3DPRIM_TRIFAN
;
111 if (hw_prim
!= brw
->primitive
) {
112 brw
->primitive
= hw_prim
;
113 brw
->state
.dirty
.brw
|= BRW_NEW_PRIMITIVE
;
115 if (reduced_prim
[prim
->mode
] != brw
->intel
.reduced_primitive
) {
116 brw
->intel
.reduced_primitive
= reduced_prim
[prim
->mode
];
117 brw
->state
.dirty
.brw
|= BRW_NEW_REDUCED_PRIMITIVE
;
122 static void gen6_set_prim(struct brw_context
*brw
,
123 const struct _mesa_prim
*prim
)
127 DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim
->mode
));
129 hw_prim
= prim_to_hw_prim
[prim
->mode
];
131 if (hw_prim
!= brw
->primitive
) {
132 brw
->primitive
= hw_prim
;
133 brw
->state
.dirty
.brw
|= BRW_NEW_PRIMITIVE
;
138 static GLuint
trim(GLenum prim
, GLuint length
)
140 if (prim
== GL_QUAD_STRIP
)
141 return length
> 3 ? (length
- length
% 2) : 0;
142 else if (prim
== GL_QUADS
)
143 return length
- length
% 4;
149 static void brw_emit_prim(struct brw_context
*brw
,
150 const struct _mesa_prim
*prim
,
153 struct intel_context
*intel
= &brw
->intel
;
154 int verts_per_instance
;
155 int vertex_access_type
;
156 int start_vertex_location
;
157 int base_vertex_location
;
159 DBG("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim
->mode
),
160 prim
->start
, prim
->count
);
162 start_vertex_location
= prim
->start
;
163 base_vertex_location
= prim
->basevertex
;
165 vertex_access_type
= GEN4_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM
;
166 start_vertex_location
+= brw
->ib
.start_vertex_offset
;
167 base_vertex_location
+= brw
->vb
.start_vertex_bias
;
169 vertex_access_type
= GEN4_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL
;
170 start_vertex_location
+= brw
->vb
.start_vertex_bias
;
173 verts_per_instance
= trim(prim
->mode
, prim
->count
);
175 /* If nothing to emit, just return. */
176 if (verts_per_instance
== 0)
179 /* If we're set to always flush, do it before and after the primitive emit.
180 * We want to catch both missed flushes that hurt instruction/state cache
181 * and missed flushes of the render cache as it heads to other parts of
182 * the besides the draw code.
184 if (intel
->always_flush_cache
) {
185 intel_batchbuffer_emit_mi_flush(intel
);
189 OUT_BATCH(CMD_3D_PRIM
<< 16 | (6 - 2) |
190 hw_prim
<< GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT
|
192 OUT_BATCH(verts_per_instance
);
193 OUT_BATCH(start_vertex_location
);
194 OUT_BATCH(prim
->num_instances
);
195 OUT_BATCH(0); // start instance location
196 OUT_BATCH(base_vertex_location
);
199 intel
->batch
.need_workaround_flush
= true;
201 if (intel
->always_flush_cache
) {
202 intel_batchbuffer_emit_mi_flush(intel
);
206 static void gen7_emit_prim(struct brw_context
*brw
,
207 const struct _mesa_prim
*prim
,
210 struct intel_context
*intel
= &brw
->intel
;
211 int verts_per_instance
;
212 int vertex_access_type
;
213 int start_vertex_location
;
214 int base_vertex_location
;
216 DBG("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim
->mode
),
217 prim
->start
, prim
->count
);
219 start_vertex_location
= prim
->start
;
220 base_vertex_location
= prim
->basevertex
;
222 vertex_access_type
= GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM
;
223 start_vertex_location
+= brw
->ib
.start_vertex_offset
;
224 base_vertex_location
+= brw
->vb
.start_vertex_bias
;
226 vertex_access_type
= GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL
;
227 start_vertex_location
+= brw
->vb
.start_vertex_bias
;
230 verts_per_instance
= trim(prim
->mode
, prim
->count
);
232 /* If nothing to emit, just return. */
233 if (verts_per_instance
== 0)
236 /* If we're set to always flush, do it before and after the primitive emit.
237 * We want to catch both missed flushes that hurt instruction/state cache
238 * and missed flushes of the render cache as it heads to other parts of
239 * the besides the draw code.
241 if (intel
->always_flush_cache
) {
242 intel_batchbuffer_emit_mi_flush(intel
);
246 OUT_BATCH(CMD_3D_PRIM
<< 16 | (7 - 2));
247 OUT_BATCH(hw_prim
| vertex_access_type
);
248 OUT_BATCH(verts_per_instance
);
249 OUT_BATCH(start_vertex_location
);
250 OUT_BATCH(prim
->num_instances
);
251 OUT_BATCH(0); // start instance location
252 OUT_BATCH(base_vertex_location
);
255 if (intel
->always_flush_cache
) {
256 intel_batchbuffer_emit_mi_flush(intel
);
261 static void brw_merge_inputs( struct brw_context
*brw
,
262 const struct gl_client_array
*arrays
[])
264 struct brw_vertex_info old
= brw
->vb
.info
;
267 for (i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
268 drm_intel_bo_unreference(brw
->vb
.buffers
[i
].bo
);
269 brw
->vb
.buffers
[i
].bo
= NULL
;
271 brw
->vb
.nr_buffers
= 0;
273 memset(&brw
->vb
.info
, 0, sizeof(brw
->vb
.info
));
275 for (i
= 0; i
< VERT_ATTRIB_MAX
; i
++) {
276 brw
->vb
.inputs
[i
].buffer
= -1;
277 brw
->vb
.inputs
[i
].glarray
= arrays
[i
];
278 brw
->vb
.inputs
[i
].attrib
= (gl_vert_attrib
) i
;
280 if (arrays
[i
]->StrideB
!= 0)
281 brw
->vb
.info
.sizes
[i
/16] |= (brw
->vb
.inputs
[i
].glarray
->Size
- 1) <<
285 /* Raise statechanges if input sizes have changed. */
286 if (memcmp(brw
->vb
.info
.sizes
, old
.sizes
, sizeof(old
.sizes
)) != 0)
287 brw
->state
.dirty
.brw
|= BRW_NEW_INPUT_DIMENSIONS
;
291 * \brief Resolve buffers before drawing.
293 * Resolve the depth buffer's HiZ buffer and resolve the depth buffer of each
294 * enabled depth texture.
296 * (In the future, this will also perform MSAA resolves).
299 brw_predraw_resolve_buffers(struct brw_context
*brw
)
301 struct gl_context
*ctx
= &brw
->intel
.ctx
;
302 struct intel_context
*intel
= &brw
->intel
;
303 struct intel_renderbuffer
*depth_irb
;
304 struct intel_texture_object
*tex_obj
;
306 /* Resolve the depth buffer's HiZ buffer. */
307 depth_irb
= intel_get_renderbuffer(ctx
->DrawBuffer
, BUFFER_DEPTH
);
308 if (depth_irb
&& depth_irb
->mt
) {
309 intel_renderbuffer_resolve_hiz(intel
, depth_irb
);
312 /* Resolve depth buffer of each enabled depth texture. */
313 for (int i
= 0; i
< BRW_MAX_TEX_UNIT
; i
++) {
314 if (!ctx
->Texture
.Unit
[i
]._ReallyEnabled
)
316 tex_obj
= intel_texture_object(ctx
->Texture
.Unit
[i
]._Current
);
317 if (!tex_obj
|| !tex_obj
->mt
)
319 intel_miptree_all_slices_resolve_depth(intel
, tex_obj
->mt
);
324 * \brief Call this after drawing to mark which buffers need resolving
326 * If the depth buffer was written to and if it has an accompanying HiZ
327 * buffer, then mark that it needs a depth resolve.
329 * (In the future, this will also mark needed MSAA resolves).
331 static void brw_postdraw_set_buffers_need_resolve(struct brw_context
*brw
)
333 struct gl_context
*ctx
= &brw
->intel
.ctx
;
334 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
335 struct intel_renderbuffer
*depth_irb
=
336 intel_get_renderbuffer(fb
, BUFFER_DEPTH
);
338 if (depth_irb
&& ctx
->Depth
.Mask
) {
339 intel_renderbuffer_set_needs_depth_resolve(depth_irb
);
344 verts_per_prim(GLenum mode
)
353 case GL_TRIANGLE_STRIP
:
354 case GL_TRIANGLE_FAN
:
362 "unknown prim type in transform feedback primitive count");
368 * Update internal counters based on the the drawing operation described in
372 brw_update_primitive_count(struct brw_context
*brw
,
373 const struct _mesa_prim
*prim
)
375 uint32_t count
= count_tessellated_primitives(prim
);
376 brw
->sol
.primitives_generated
+= count
;
377 if (brw
->intel
.ctx
.TransformFeedback
.CurrentObject
->Active
&&
378 !brw
->intel
.ctx
.TransformFeedback
.CurrentObject
->Paused
) {
379 /* Update brw->sol.svbi_0_max_index to reflect the amount by which the
380 * hardware is going to increment SVBI 0 when this drawing operation
381 * occurs. This is necessary because the kernel does not (yet) save and
382 * restore GPU registers when context switching, so we'll need to be
383 * able to reload SVBI 0 with the correct value in case we have to start
384 * a new batch buffer.
386 unsigned verts
= verts_per_prim(prim
->mode
);
387 uint32_t space_avail
=
388 (brw
->sol
.svbi_0_max_index
- brw
->sol
.svbi_0_starting_index
) / verts
;
389 uint32_t primitives_written
= MIN2 (space_avail
, count
);
390 brw
->sol
.svbi_0_starting_index
+= verts
* primitives_written
;
392 /* And update the TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN query. */
393 brw
->sol
.primitives_written
+= primitives_written
;
397 /* May fail if out of video memory for texture or vbo upload, or on
398 * fallback conditions.
400 static bool brw_try_draw_prims( struct gl_context
*ctx
,
401 const struct gl_client_array
*arrays
[],
402 const struct _mesa_prim
*prim
,
404 const struct _mesa_index_buffer
*ib
,
408 struct intel_context
*intel
= intel_context(ctx
);
409 struct brw_context
*brw
= brw_context(ctx
);
412 bool fail_next
= false;
415 _mesa_update_state( ctx
);
417 /* We have to validate the textures *before* checking for fallbacks;
418 * otherwise, the software fallback won't be able to rely on the
419 * texture state, the firstLevel and lastLevel fields won't be
420 * set in the intel texture object (they'll both be 0), and the
421 * software fallback will segfault if it attempts to access any
422 * texture level other than level 0.
424 brw_validate_textures( brw
);
426 /* Resolves must occur after updating state and finalizing textures but
427 * before setting up any hardware state for this draw call.
429 brw_predraw_resolve_buffers(brw
);
431 /* Bind all inputs, derive varying and size information:
433 brw_merge_inputs( brw
, arrays
);
436 brw
->state
.dirty
.brw
|= BRW_NEW_INDICES
;
438 brw
->vb
.min_index
= min_index
;
439 brw
->vb
.max_index
= max_index
;
440 brw
->state
.dirty
.brw
|= BRW_NEW_VERTICES
;
442 /* Have to validate state quite late. Will rebuild tnl_program,
443 * which depends on varying information.
445 * Note this is where brw->vs->prog_data.inputs_read is calculated,
446 * so can't access it earlier.
449 intel_prepare_render(intel
);
451 for (i
= 0; i
< nr_prims
; i
++) {
452 int estimated_max_prim_size
;
454 estimated_max_prim_size
= 512; /* batchbuffer commands */
455 estimated_max_prim_size
+= (BRW_MAX_TEX_UNIT
*
456 (sizeof(struct brw_sampler_state
) +
457 sizeof(struct gen5_sampler_default_color
)));
458 estimated_max_prim_size
+= 1024; /* gen6 VS push constants */
459 estimated_max_prim_size
+= 1024; /* gen6 WM push constants */
460 estimated_max_prim_size
+= 512; /* misc. pad */
462 /* Flush the batch if it's approaching full, so that we don't wrap while
463 * we've got validated state that needs to be in the same batch as the
466 intel_batchbuffer_require_space(intel
, estimated_max_prim_size
, false);
467 intel_batchbuffer_save_state(intel
);
470 brw_set_prim(brw
, &prim
[i
]);
472 gen6_set_prim(brw
, &prim
[i
]);
475 /* Note that before the loop, brw->state.dirty.brw was set to != 0, and
476 * that the state updated in the loop outside of this block is that in
477 * *_set_prim or intel_batchbuffer_flush(), which only impacts
478 * brw->state.dirty.brw.
480 if (brw
->state
.dirty
.brw
) {
481 intel
->no_batch_wrap
= true;
482 brw_upload_state(brw
);
484 if (unlikely(brw
->intel
.Fallback
)) {
485 intel
->no_batch_wrap
= false;
492 gen7_emit_prim(brw
, &prim
[i
], brw
->primitive
);
494 brw_emit_prim(brw
, &prim
[i
], brw
->primitive
);
496 intel
->no_batch_wrap
= false;
498 if (dri_bufmgr_check_aperture_space(&intel
->batch
.bo
, 1)) {
500 intel_batchbuffer_reset_to_saved(intel
);
501 intel_batchbuffer_flush(intel
);
505 if (intel_batchbuffer_flush(intel
) == -ENOSPC
) {
506 static bool warned
= false;
509 fprintf(stderr
, "i965: Single primitive emit exceeded"
510 "available aperture space\n");
519 if (!_mesa_meta_in_progress(ctx
))
520 brw_update_primitive_count(brw
, &prim
[i
]);
523 if (intel
->always_flush_batch
)
524 intel_batchbuffer_flush(intel
);
527 brw_state_cache_check_size(brw
);
528 brw_postdraw_set_buffers_need_resolve(brw
);
533 void brw_draw_prims( struct gl_context
*ctx
,
534 const struct gl_client_array
*arrays
[],
535 const struct _mesa_prim
*prim
,
537 const struct _mesa_index_buffer
*ib
,
538 GLboolean index_bounds_valid
,
541 struct gl_transform_feedback_object
*tfb_vertcount
)
545 if (!_mesa_check_conditional_render(ctx
))
548 if (!vbo_all_varyings_in_vbos(arrays
)) {
549 if (!index_bounds_valid
)
550 vbo_get_minmax_indices(ctx
, prim
, ib
, &min_index
, &max_index
, nr_prims
);
552 /* Decide if we want to rebase. If so we end up recursing once
553 * only into this function.
555 if (min_index
!= 0 && !vbo_any_varyings_in_vbos(arrays
)) {
556 vbo_rebase_prims(ctx
, arrays
,
558 ib
, min_index
, max_index
,
564 /* Make a first attempt at drawing:
566 retval
= brw_try_draw_prims(ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
);
568 /* Otherwise, we really are out of memory. Pass the drawing
569 * command to the software tnl module and which will in turn call
570 * swrast to do the drawing.
573 _swsetup_Wakeup(ctx
);
575 _tnl_draw_prims(ctx
, arrays
, prim
, nr_prims
, ib
, min_index
, max_index
);
580 void brw_draw_init( struct brw_context
*brw
)
582 struct gl_context
*ctx
= &brw
->intel
.ctx
;
583 struct vbo_context
*vbo
= vbo_context(ctx
);
586 /* Register our drawing function:
588 vbo
->draw_prims
= brw_draw_prims
;
590 for (i
= 0; i
< VERT_ATTRIB_MAX
; i
++)
591 brw
->vb
.inputs
[i
].buffer
= -1;
592 brw
->vb
.nr_buffers
= 0;
593 brw
->vb
.nr_enabled
= 0;
596 void brw_draw_destroy( struct brw_context
*brw
)
600 for (i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
601 drm_intel_bo_unreference(brw
->vb
.buffers
[i
].bo
);
602 brw
->vb
.buffers
[i
].bo
= NULL
;
604 brw
->vb
.nr_buffers
= 0;
606 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
607 brw
->vb
.enabled
[i
]->buffer
= -1;
609 brw
->vb
.nr_enabled
= 0;
611 drm_intel_bo_unreference(brw
->ib
.bo
);