2 * Copyright 2003 VMware, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include <sys/errno.h>
28 #include "main/arrayobj.h"
29 #include "main/blend.h"
30 #include "main/context.h"
31 #include "main/condrender.h"
32 #include "main/samplerobj.h"
33 #include "main/state.h"
34 #include "main/enums.h"
35 #include "main/macros.h"
36 #include "main/transformfeedback.h"
37 #include "main/framebuffer.h"
38 #include "main/varray.h"
41 #include "swrast/swrast.h"
42 #include "swrast_setup/swrast_setup.h"
43 #include "drivers/common/meta.h"
44 #include "util/bitscan.h"
45 #include "util/bitset.h"
47 #include "brw_blorp.h"
49 #include "brw_defines.h"
50 #include "compiler/brw_eu_defines.h"
51 #include "brw_context.h"
52 #include "brw_state.h"
54 #include "intel_batchbuffer.h"
55 #include "intel_buffers.h"
56 #include "intel_fbo.h"
57 #include "intel_mipmap_tree.h"
58 #include "intel_buffer_objects.h"
60 #define FILE_DEBUG_FLAG DEBUG_PRIMS
63 static const GLenum reduced_prim
[GL_POLYGON
+1] = {
64 [GL_POINTS
] = GL_POINTS
,
65 [GL_LINES
] = GL_LINES
,
66 [GL_LINE_LOOP
] = GL_LINES
,
67 [GL_LINE_STRIP
] = GL_LINES
,
68 [GL_TRIANGLES
] = GL_TRIANGLES
,
69 [GL_TRIANGLE_STRIP
] = GL_TRIANGLES
,
70 [GL_TRIANGLE_FAN
] = GL_TRIANGLES
,
71 [GL_QUADS
] = GL_TRIANGLES
,
72 [GL_QUAD_STRIP
] = GL_TRIANGLES
,
73 [GL_POLYGON
] = GL_TRIANGLES
76 /* When the primitive changes, set a state bit and re-validate. Not
77 * the nicest and would rather deal with this by having all the
78 * programs be immune to the active primitive (ie. cope with all
79 * possibilities). That may not be realistic however.
82 brw_set_prim(struct brw_context
*brw
, const struct _mesa_prim
*prim
)
84 struct gl_context
*ctx
= &brw
->ctx
;
85 uint32_t hw_prim
= get_hw_prim_for_gl_prim(prim
->mode
);
87 DBG("PRIM: %s\n", _mesa_enum_to_string(prim
->mode
));
89 /* Slight optimization to avoid the GS program when not needed:
91 if (prim
->mode
== GL_QUAD_STRIP
&&
92 ctx
->Light
.ShadeModel
!= GL_FLAT
&&
93 ctx
->Polygon
.FrontMode
== GL_FILL
&&
94 ctx
->Polygon
.BackMode
== GL_FILL
)
95 hw_prim
= _3DPRIM_TRISTRIP
;
97 if (prim
->mode
== GL_QUADS
&& prim
->count
== 4 &&
98 ctx
->Light
.ShadeModel
!= GL_FLAT
&&
99 ctx
->Polygon
.FrontMode
== GL_FILL
&&
100 ctx
->Polygon
.BackMode
== GL_FILL
) {
101 hw_prim
= _3DPRIM_TRIFAN
;
104 if (hw_prim
!= brw
->primitive
) {
105 brw
->primitive
= hw_prim
;
106 brw
->ctx
.NewDriverState
|= BRW_NEW_PRIMITIVE
;
108 if (reduced_prim
[prim
->mode
] != brw
->reduced_primitive
) {
109 brw
->reduced_primitive
= reduced_prim
[prim
->mode
];
110 brw
->ctx
.NewDriverState
|= BRW_NEW_REDUCED_PRIMITIVE
;
116 gen6_set_prim(struct brw_context
*brw
, const struct _mesa_prim
*prim
)
118 const struct gl_context
*ctx
= &brw
->ctx
;
121 DBG("PRIM: %s\n", _mesa_enum_to_string(prim
->mode
));
123 if (prim
->mode
== GL_PATCHES
) {
124 hw_prim
= _3DPRIM_PATCHLIST(ctx
->TessCtrlProgram
.patch_vertices
);
126 hw_prim
= get_hw_prim_for_gl_prim(prim
->mode
);
129 if (hw_prim
!= brw
->primitive
) {
130 brw
->primitive
= hw_prim
;
131 brw
->ctx
.NewDriverState
|= BRW_NEW_PRIMITIVE
;
132 if (prim
->mode
== GL_PATCHES
)
133 brw
->ctx
.NewDriverState
|= BRW_NEW_PATCH_PRIMITIVE
;
139 * The hardware is capable of removing dangling vertices on its own; however,
140 * prior to Gen6, we sometimes convert quads into trifans (and quad strips
141 * into tristrips), since pre-Gen6 hardware requires a GS to render quads.
142 * This function manually trims dangling vertices from a draw call involving
143 * quads so that those dangling vertices won't get drawn when we convert to
147 trim(GLenum prim
, GLuint length
)
149 if (prim
== GL_QUAD_STRIP
)
150 return length
> 3 ? (length
- length
% 2) : 0;
151 else if (prim
== GL_QUADS
)
152 return length
- length
% 4;
159 brw_emit_prim(struct brw_context
*brw
,
160 const struct _mesa_prim
*prim
,
162 struct brw_transform_feedback_object
*xfb_obj
,
165 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
166 int verts_per_instance
;
167 int vertex_access_type
;
170 DBG("PRIM: %s %d %d\n", _mesa_enum_to_string(prim
->mode
),
171 prim
->start
, prim
->count
);
173 int start_vertex_location
= prim
->start
;
174 int base_vertex_location
= prim
->basevertex
;
177 vertex_access_type
= devinfo
->gen
>= 7 ?
178 GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM
:
179 GEN4_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM
;
180 start_vertex_location
+= brw
->ib
.start_vertex_offset
;
181 base_vertex_location
+= brw
->vb
.start_vertex_bias
;
183 vertex_access_type
= devinfo
->gen
>= 7 ?
184 GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL
:
185 GEN4_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL
;
186 start_vertex_location
+= brw
->vb
.start_vertex_bias
;
189 /* We only need to trim the primitive count on pre-Gen6. */
190 if (devinfo
->gen
< 6)
191 verts_per_instance
= trim(prim
->mode
, prim
->count
);
193 verts_per_instance
= prim
->count
;
195 /* If nothing to emit, just return. */
196 if (verts_per_instance
== 0 && !prim
->is_indirect
&& !xfb_obj
)
199 /* If we're set to always flush, do it before and after the primitive emit.
200 * We want to catch both missed flushes that hurt instruction/state cache
201 * and missed flushes of the render cache as it heads to other parts of
202 * the besides the draw code.
204 if (brw
->always_flush_cache
)
205 brw_emit_mi_flush(brw
);
207 /* If indirect, emit a bunch of loads from the indirect BO. */
209 indirect_flag
= GEN7_3DPRIM_INDIRECT_PARAMETER_ENABLE
;
211 brw_load_register_mem(brw
, GEN7_3DPRIM_VERTEX_COUNT
,
212 xfb_obj
->prim_count_bo
,
213 stream
* sizeof(uint32_t));
215 OUT_BATCH(MI_LOAD_REGISTER_IMM
| (9 - 2));
216 OUT_BATCH(GEN7_3DPRIM_INSTANCE_COUNT
);
217 OUT_BATCH(prim
->num_instances
);
218 OUT_BATCH(GEN7_3DPRIM_START_VERTEX
);
220 OUT_BATCH(GEN7_3DPRIM_BASE_VERTEX
);
222 OUT_BATCH(GEN7_3DPRIM_START_INSTANCE
);
225 } else if (prim
->is_indirect
) {
226 struct gl_buffer_object
*indirect_buffer
= brw
->ctx
.DrawIndirectBuffer
;
227 struct brw_bo
*bo
= intel_bufferobj_buffer(brw
,
228 intel_buffer_object(indirect_buffer
),
229 prim
->indirect_offset
, 5 * sizeof(GLuint
), false);
231 indirect_flag
= GEN7_3DPRIM_INDIRECT_PARAMETER_ENABLE
;
233 brw_load_register_mem(brw
, GEN7_3DPRIM_VERTEX_COUNT
, bo
,
234 prim
->indirect_offset
+ 0);
235 brw_load_register_mem(brw
, GEN7_3DPRIM_INSTANCE_COUNT
, bo
,
236 prim
->indirect_offset
+ 4);
238 brw_load_register_mem(brw
, GEN7_3DPRIM_START_VERTEX
, bo
,
239 prim
->indirect_offset
+ 8);
241 brw_load_register_mem(brw
, GEN7_3DPRIM_BASE_VERTEX
, bo
,
242 prim
->indirect_offset
+ 12);
243 brw_load_register_mem(brw
, GEN7_3DPRIM_START_INSTANCE
, bo
,
244 prim
->indirect_offset
+ 16);
246 brw_load_register_mem(brw
, GEN7_3DPRIM_START_INSTANCE
, bo
,
247 prim
->indirect_offset
+ 12);
248 brw_load_register_imm32(brw
, GEN7_3DPRIM_BASE_VERTEX
, 0);
254 BEGIN_BATCH(devinfo
->gen
>= 7 ? 7 : 6);
256 if (devinfo
->gen
>= 7) {
257 const int predicate_enable
=
258 (brw
->predicate
.state
== BRW_PREDICATE_STATE_USE_BIT
)
259 ? GEN7_3DPRIM_PREDICATE_ENABLE
: 0;
261 OUT_BATCH(CMD_3D_PRIM
<< 16 | (7 - 2) | indirect_flag
| predicate_enable
);
262 OUT_BATCH(hw_prim
| vertex_access_type
);
264 OUT_BATCH(CMD_3D_PRIM
<< 16 | (6 - 2) |
265 hw_prim
<< GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT
|
268 OUT_BATCH(verts_per_instance
);
269 OUT_BATCH(start_vertex_location
);
270 OUT_BATCH(prim
->num_instances
);
271 OUT_BATCH(prim
->base_instance
);
272 OUT_BATCH(base_vertex_location
);
275 if (brw
->always_flush_cache
)
276 brw_emit_mi_flush(brw
);
281 brw_merge_inputs(struct brw_context
*brw
)
283 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
284 const struct gl_context
*ctx
= &brw
->ctx
;
287 for (i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
288 brw_bo_unreference(brw
->vb
.buffers
[i
].bo
);
289 brw
->vb
.buffers
[i
].bo
= NULL
;
291 brw
->vb
.nr_buffers
= 0;
293 for (i
= 0; i
< VERT_ATTRIB_MAX
; i
++) {
294 struct brw_vertex_element
*input
= &brw
->vb
.inputs
[i
];
296 _mesa_draw_attrib_and_binding(ctx
, i
,
297 &input
->glattrib
, &input
->glbinding
);
300 if (devinfo
->gen
< 8 && !devinfo
->is_haswell
) {
301 uint64_t mask
= ctx
->VertexProgram
._Current
->info
.inputs_read
;
302 /* Prior to Haswell, the hardware can't natively support GL_FIXED or
303 * 2_10_10_10_REV vertex formats. Set appropriate workaround flags.
306 const struct gl_array_attributes
*glattrib
;
307 uint8_t wa_flags
= 0;
309 i
= u_bit_scan64(&mask
);
310 glattrib
= brw
->vb
.inputs
[i
].glattrib
;
312 switch (glattrib
->Type
) {
315 wa_flags
= glattrib
->Size
;
318 case GL_INT_2_10_10_10_REV
:
319 wa_flags
|= BRW_ATTRIB_WA_SIGN
;
322 case GL_UNSIGNED_INT_2_10_10_10_REV
:
323 if (glattrib
->Format
== GL_BGRA
)
324 wa_flags
|= BRW_ATTRIB_WA_BGRA
;
326 if (glattrib
->Normalized
)
327 wa_flags
|= BRW_ATTRIB_WA_NORMALIZE
;
328 else if (!glattrib
->Integer
)
329 wa_flags
|= BRW_ATTRIB_WA_SCALE
;
334 if (brw
->vb
.attrib_wa_flags
[i
] != wa_flags
) {
335 brw
->vb
.attrib_wa_flags
[i
] = wa_flags
;
336 brw
->ctx
.NewDriverState
|= BRW_NEW_VS_ATTRIB_WORKAROUNDS
;
342 /* Disable auxiliary buffers if a renderbuffer is also bound as a texture
343 * or shader image. This causes a self-dependency, where both rendering
344 * and sampling may concurrently read or write the CCS buffer, causing
348 intel_disable_rb_aux_buffer(struct brw_context
*brw
,
349 bool *draw_aux_buffer_disabled
,
350 struct intel_mipmap_tree
*tex_mt
,
351 unsigned min_level
, unsigned num_levels
,
354 const struct gl_framebuffer
*fb
= brw
->ctx
.DrawBuffer
;
357 /* We only need to worry about color compression and fast clears. */
358 if (tex_mt
->aux_usage
!= ISL_AUX_USAGE_CCS_D
&&
359 tex_mt
->aux_usage
!= ISL_AUX_USAGE_CCS_E
)
362 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
363 const struct intel_renderbuffer
*irb
=
364 intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
366 if (irb
&& irb
->mt
->bo
== tex_mt
->bo
&&
367 irb
->mt_level
>= min_level
&&
368 irb
->mt_level
< min_level
+ num_levels
) {
369 found
= draw_aux_buffer_disabled
[i
] = true;
374 perf_debug("Disabling CCS because a renderbuffer is also bound %s.\n",
382 mark_textures_used_for_txf(BITSET_WORD
*used_for_txf
,
383 const struct gl_program
*prog
)
388 unsigned mask
= prog
->SamplersUsed
& prog
->info
.textures_used_by_txf
;
390 int s
= u_bit_scan(&mask
);
391 BITSET_SET(used_for_txf
, prog
->SamplerUnits
[s
]);
396 * \brief Resolve buffers before drawing.
398 * Resolve the depth buffer's HiZ buffer, resolve the depth buffer of each
399 * enabled depth texture, and flush the render cache for any dirty textures.
402 brw_predraw_resolve_inputs(struct brw_context
*brw
, bool rendering
,
403 bool *draw_aux_buffer_disabled
)
405 struct gl_context
*ctx
= &brw
->ctx
;
406 struct intel_texture_object
*tex_obj
;
408 BITSET_DECLARE(used_for_txf
, MAX_COMBINED_TEXTURE_IMAGE_UNITS
);
409 memset(used_for_txf
, 0, sizeof(used_for_txf
));
411 mark_textures_used_for_txf(used_for_txf
, ctx
->VertexProgram
._Current
);
412 mark_textures_used_for_txf(used_for_txf
, ctx
->TessCtrlProgram
._Current
);
413 mark_textures_used_for_txf(used_for_txf
, ctx
->TessEvalProgram
._Current
);
414 mark_textures_used_for_txf(used_for_txf
, ctx
->GeometryProgram
._Current
);
415 mark_textures_used_for_txf(used_for_txf
, ctx
->FragmentProgram
._Current
);
417 mark_textures_used_for_txf(used_for_txf
, ctx
->ComputeProgram
._Current
);
420 /* Resolve depth buffer and render cache of each enabled texture. */
421 int maxEnabledUnit
= ctx
->Texture
._MaxEnabledTexImageUnit
;
422 for (int i
= 0; i
<= maxEnabledUnit
; i
++) {
423 if (!ctx
->Texture
.Unit
[i
]._Current
)
425 tex_obj
= intel_texture_object(ctx
->Texture
.Unit
[i
]._Current
);
426 if (!tex_obj
|| !tex_obj
->mt
)
429 struct gl_sampler_object
*sampler
= _mesa_get_samplerobj(ctx
, i
);
430 enum isl_format view_format
=
431 translate_tex_format(brw
, tex_obj
->_Format
, sampler
->sRGBDecode
);
433 unsigned min_level
, min_layer
, num_levels
, num_layers
;
434 if (tex_obj
->base
.Immutable
) {
435 min_level
= tex_obj
->base
.MinLevel
;
436 num_levels
= MIN2(tex_obj
->base
.NumLevels
, tex_obj
->_MaxLevel
+ 1);
437 min_layer
= tex_obj
->base
.MinLayer
;
438 num_layers
= tex_obj
->base
.Target
!= GL_TEXTURE_3D
?
439 tex_obj
->base
.NumLayers
: INTEL_REMAINING_LAYERS
;
441 min_level
= tex_obj
->base
.BaseLevel
;
442 num_levels
= tex_obj
->_MaxLevel
- tex_obj
->base
.BaseLevel
+ 1;
444 num_layers
= INTEL_REMAINING_LAYERS
;
448 intel_disable_rb_aux_buffer(brw
, draw_aux_buffer_disabled
,
449 tex_obj
->mt
, min_level
, num_levels
,
453 intel_miptree_prepare_texture(brw
, tex_obj
->mt
, view_format
,
454 min_level
, num_levels
,
455 min_layer
, num_layers
);
457 /* If any programs are using it with texelFetch, we may need to also do
458 * a prepare with an sRGB format to ensure texelFetch works "properly".
460 if (BITSET_TEST(used_for_txf
, i
)) {
461 enum isl_format txf_format
=
462 translate_tex_format(brw
, tex_obj
->_Format
, GL_DECODE_EXT
);
463 if (txf_format
!= view_format
) {
464 intel_miptree_prepare_texture(brw
, tex_obj
->mt
, txf_format
,
465 min_level
, num_levels
,
466 min_layer
, num_layers
);
470 brw_cache_flush_for_read(brw
, tex_obj
->mt
->bo
);
472 if (tex_obj
->base
.StencilSampling
||
473 tex_obj
->mt
->format
== MESA_FORMAT_S_UINT8
) {
474 intel_update_r8stencil(brw
, tex_obj
->mt
);
478 /* Resolve color for each active shader image. */
479 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
480 const struct gl_program
*prog
= ctx
->_Shader
->CurrentProgram
[i
];
482 if (unlikely(prog
&& prog
->info
.num_images
)) {
483 for (unsigned j
= 0; j
< prog
->info
.num_images
; j
++) {
484 struct gl_image_unit
*u
=
485 &ctx
->ImageUnits
[prog
->sh
.ImageUnits
[j
]];
486 tex_obj
= intel_texture_object(u
->TexObj
);
488 if (tex_obj
&& tex_obj
->mt
) {
490 intel_disable_rb_aux_buffer(brw
, draw_aux_buffer_disabled
,
492 "as a shader image");
495 intel_miptree_prepare_image(brw
, tex_obj
->mt
);
497 brw_cache_flush_for_read(brw
, tex_obj
->mt
->bo
);
505 brw_predraw_resolve_framebuffer(struct brw_context
*brw
,
506 bool *draw_aux_buffer_disabled
)
508 struct gl_context
*ctx
= &brw
->ctx
;
509 struct intel_renderbuffer
*depth_irb
;
511 /* Resolve the depth buffer's HiZ buffer. */
512 depth_irb
= intel_get_renderbuffer(ctx
->DrawBuffer
, BUFFER_DEPTH
);
513 if (depth_irb
&& depth_irb
->mt
) {
514 intel_miptree_prepare_depth(brw
, depth_irb
->mt
,
517 depth_irb
->layer_count
);
520 /* Resolve color buffers for non-coherent framebuffer fetch. */
521 if (!ctx
->Extensions
.EXT_shader_framebuffer_fetch
&&
522 ctx
->FragmentProgram
._Current
&&
523 ctx
->FragmentProgram
._Current
->info
.outputs_read
) {
524 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
526 /* This is only used for non-coherent framebuffer fetch, so we don't
527 * need to worry about CCS_E and can simply pass 'false' below.
529 assert(brw
->screen
->devinfo
.gen
< 9);
531 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
532 const struct intel_renderbuffer
*irb
=
533 intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
536 intel_miptree_prepare_texture(brw
, irb
->mt
, irb
->mt
->surf
.format
,
538 irb
->mt_layer
, irb
->layer_count
);
543 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
544 for (int i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
545 struct intel_renderbuffer
*irb
=
546 intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
548 if (irb
== NULL
|| irb
->mt
== NULL
)
551 mesa_format mesa_format
=
552 _mesa_get_render_format(ctx
, intel_rb_format(irb
));
553 enum isl_format isl_format
= brw_isl_format_for_mesa_format(mesa_format
);
554 bool blend_enabled
= ctx
->Color
.BlendEnabled
& (1 << i
);
555 enum isl_aux_usage aux_usage
=
556 intel_miptree_render_aux_usage(brw
, irb
->mt
, isl_format
,
558 draw_aux_buffer_disabled
[i
]);
559 if (brw
->draw_aux_usage
[i
] != aux_usage
) {
560 brw
->ctx
.NewDriverState
|= BRW_NEW_AUX_STATE
;
561 brw
->draw_aux_usage
[i
] = aux_usage
;
564 intel_miptree_prepare_render(brw
, irb
->mt
, irb
->mt_level
,
565 irb
->mt_layer
, irb
->layer_count
,
568 brw_cache_flush_for_render(brw
, irb
->mt
->bo
,
569 isl_format
, aux_usage
);
574 * \brief Call this after drawing to mark which buffers need resolving
576 * If the depth buffer was written to and if it has an accompanying HiZ
577 * buffer, then mark that it needs a depth resolve.
579 * If the stencil buffer was written to then mark that it may need to be
580 * copied to an R8 texture.
582 * If the color buffer is a multisample window system buffer, then
583 * mark that it needs a downsample.
585 * Also mark any render targets which will be textured as needing a render
589 brw_postdraw_set_buffers_need_resolve(struct brw_context
*brw
)
591 struct gl_context
*ctx
= &brw
->ctx
;
592 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
594 struct intel_renderbuffer
*front_irb
= NULL
;
595 struct intel_renderbuffer
*back_irb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
596 struct intel_renderbuffer
*depth_irb
= intel_get_renderbuffer(fb
, BUFFER_DEPTH
);
597 struct intel_renderbuffer
*stencil_irb
= intel_get_renderbuffer(fb
, BUFFER_STENCIL
);
598 struct gl_renderbuffer_attachment
*depth_att
= &fb
->Attachment
[BUFFER_DEPTH
];
600 if (_mesa_is_front_buffer_drawing(fb
))
601 front_irb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
604 front_irb
->need_downsample
= true;
606 back_irb
->need_downsample
= true;
608 bool depth_written
= brw_depth_writes_enabled(brw
);
609 if (depth_att
->Layered
) {
610 intel_miptree_finish_depth(brw
, depth_irb
->mt
,
613 depth_irb
->layer_count
,
616 intel_miptree_finish_depth(brw
, depth_irb
->mt
,
618 depth_irb
->mt_layer
, 1,
622 brw_depth_cache_add_bo(brw
, depth_irb
->mt
->bo
);
625 if (stencil_irb
&& brw
->stencil_write_enabled
) {
626 struct intel_mipmap_tree
*stencil_mt
=
627 stencil_irb
->mt
->stencil_mt
!= NULL
?
628 stencil_irb
->mt
->stencil_mt
: stencil_irb
->mt
;
629 brw_depth_cache_add_bo(brw
, stencil_mt
->bo
);
630 intel_miptree_finish_write(brw
, stencil_mt
, stencil_irb
->mt_level
,
631 stencil_irb
->mt_layer
,
632 stencil_irb
->layer_count
, ISL_AUX_USAGE_NONE
);
635 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
636 struct intel_renderbuffer
*irb
=
637 intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
642 mesa_format mesa_format
=
643 _mesa_get_render_format(ctx
, intel_rb_format(irb
));
644 enum isl_format isl_format
= brw_isl_format_for_mesa_format(mesa_format
);
645 enum isl_aux_usage aux_usage
= brw
->draw_aux_usage
[i
];
647 brw_render_cache_add_bo(brw
, irb
->mt
->bo
, isl_format
, aux_usage
);
649 intel_miptree_finish_render(brw
, irb
->mt
, irb
->mt_level
,
650 irb
->mt_layer
, irb
->layer_count
,
656 intel_renderbuffer_move_temp_back(struct brw_context
*brw
,
657 struct intel_renderbuffer
*irb
)
659 if (irb
->align_wa_mt
== NULL
)
662 brw_cache_flush_for_read(brw
, irb
->align_wa_mt
->bo
);
664 intel_miptree_copy_slice(brw
, irb
->align_wa_mt
, 0, 0,
666 irb
->Base
.Base
.TexImage
->Level
, irb
->mt_layer
);
668 intel_miptree_reference(&irb
->align_wa_mt
, NULL
);
670 /* Finally restore the x,y to correspond to full miptree. */
671 intel_renderbuffer_set_draw_offset(irb
);
673 /* Make sure render surface state gets re-emitted with updated miptree. */
674 brw
->NewGLState
|= _NEW_BUFFERS
;
678 brw_postdraw_reconcile_align_wa_slices(struct brw_context
*brw
)
680 struct gl_context
*ctx
= &brw
->ctx
;
681 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
683 struct intel_renderbuffer
*depth_irb
=
684 intel_get_renderbuffer(fb
, BUFFER_DEPTH
);
685 struct intel_renderbuffer
*stencil_irb
=
686 intel_get_renderbuffer(fb
, BUFFER_STENCIL
);
688 if (depth_irb
&& depth_irb
->align_wa_mt
)
689 intel_renderbuffer_move_temp_back(brw
, depth_irb
);
691 if (stencil_irb
&& stencil_irb
->align_wa_mt
)
692 intel_renderbuffer_move_temp_back(brw
, stencil_irb
);
694 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
695 struct intel_renderbuffer
*irb
=
696 intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
698 if (!irb
|| irb
->align_wa_mt
== NULL
)
701 intel_renderbuffer_move_temp_back(brw
, irb
);
706 brw_prepare_drawing(struct gl_context
*ctx
,
707 const struct _mesa_index_buffer
*ib
,
708 bool index_bounds_valid
,
712 struct brw_context
*brw
= brw_context(ctx
);
715 _mesa_update_state(ctx
);
717 /* We have to validate the textures *before* checking for fallbacks;
718 * otherwise, the software fallback won't be able to rely on the
719 * texture state, the firstLevel and lastLevel fields won't be
720 * set in the intel texture object (they'll both be 0), and the
721 * software fallback will segfault if it attempts to access any
722 * texture level other than level 0.
724 brw_validate_textures(brw
);
726 /* Find the highest sampler unit used by each shader program. A bit-count
727 * won't work since ARB programs use the texture unit number as the sampler
730 brw
->wm
.base
.sampler_count
=
731 util_last_bit(ctx
->FragmentProgram
._Current
->SamplersUsed
);
732 brw
->gs
.base
.sampler_count
= ctx
->GeometryProgram
._Current
?
733 util_last_bit(ctx
->GeometryProgram
._Current
->SamplersUsed
) : 0;
734 brw
->tes
.base
.sampler_count
= ctx
->TessEvalProgram
._Current
?
735 util_last_bit(ctx
->TessEvalProgram
._Current
->SamplersUsed
) : 0;
736 brw
->tcs
.base
.sampler_count
= ctx
->TessCtrlProgram
._Current
?
737 util_last_bit(ctx
->TessCtrlProgram
._Current
->SamplersUsed
) : 0;
738 brw
->vs
.base
.sampler_count
=
739 util_last_bit(ctx
->VertexProgram
._Current
->SamplersUsed
);
741 intel_prepare_render(brw
);
743 /* This workaround has to happen outside of brw_upload_render_state()
744 * because it may flush the batchbuffer for a blit, affecting the state
747 brw_workaround_depthstencil_alignment(brw
, 0);
749 /* Resolves must occur after updating renderbuffers, updating context state,
750 * and finalizing textures but before setting up any hardware state for
753 bool draw_aux_buffer_disabled
[MAX_DRAW_BUFFERS
] = { };
754 brw_predraw_resolve_inputs(brw
, true, draw_aux_buffer_disabled
);
755 brw_predraw_resolve_framebuffer(brw
, draw_aux_buffer_disabled
);
757 /* Bind all inputs, derive varying and size information:
759 brw_merge_inputs(brw
);
762 brw
->ctx
.NewDriverState
|= BRW_NEW_INDICES
;
764 brw
->vb
.index_bounds_valid
= index_bounds_valid
;
765 brw
->vb
.min_index
= min_index
;
766 brw
->vb
.max_index
= max_index
;
767 brw
->ctx
.NewDriverState
|= BRW_NEW_VERTICES
;
771 brw_finish_drawing(struct gl_context
*ctx
)
773 struct brw_context
*brw
= brw_context(ctx
);
775 if (brw
->always_flush_batch
)
776 intel_batchbuffer_flush(brw
);
778 brw_program_cache_check_size(brw
);
779 brw_postdraw_reconcile_align_wa_slices(brw
);
780 brw_postdraw_set_buffers_need_resolve(brw
);
782 if (brw
->draw
.draw_params_count_bo
) {
783 brw_bo_unreference(brw
->draw
.draw_params_count_bo
);
784 brw
->draw
.draw_params_count_bo
= NULL
;
788 /* May fail if out of video memory for texture or vbo upload, or on
789 * fallback conditions.
792 brw_draw_single_prim(struct gl_context
*ctx
,
793 const struct _mesa_prim
*prim
,
795 struct brw_transform_feedback_object
*xfb_obj
,
797 struct gl_buffer_object
*indirect
)
799 struct brw_context
*brw
= brw_context(ctx
);
800 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
801 bool fail_next
= false;
803 /* Flag BRW_NEW_DRAW_CALL on every draw. This allows us to have
804 * atoms that happen on every draw call.
806 brw
->ctx
.NewDriverState
|= BRW_NEW_DRAW_CALL
;
808 /* Flush the batch if the batch/state buffers are nearly full. We can
809 * grow them if needed, but this is not free, so we'd like to avoid it.
811 intel_batchbuffer_require_space(brw
, 1500);
812 brw_require_statebuffer_space(brw
, 2400);
813 intel_batchbuffer_save_state(brw
);
815 if (brw
->num_instances
!= prim
->num_instances
||
816 brw
->basevertex
!= prim
->basevertex
||
817 brw
->baseinstance
!= prim
->base_instance
) {
818 brw
->num_instances
= prim
->num_instances
;
819 brw
->basevertex
= prim
->basevertex
;
820 brw
->baseinstance
= prim
->base_instance
;
821 if (prim_id
> 0) { /* For i == 0 we just did this before the loop */
822 brw
->ctx
.NewDriverState
|= BRW_NEW_VERTICES
;
823 brw_merge_inputs(brw
);
827 /* Determine if we need to flag BRW_NEW_VERTICES for updating the
828 * gl_BaseVertexARB or gl_BaseInstanceARB values. For indirect draw, we
829 * always flag if the shader uses one of the values. For direct draws,
830 * we only flag if the values change.
832 const int new_firstvertex
=
833 prim
->indexed
? prim
->basevertex
: prim
->start
;
834 const int new_baseinstance
= prim
->base_instance
;
835 const struct brw_vs_prog_data
*vs_prog_data
=
836 brw_vs_prog_data(brw
->vs
.base
.prog_data
);
838 const bool uses_draw_parameters
=
839 vs_prog_data
->uses_firstvertex
||
840 vs_prog_data
->uses_baseinstance
;
842 if ((uses_draw_parameters
&& prim
->is_indirect
) ||
843 (vs_prog_data
->uses_firstvertex
&&
844 brw
->draw
.params
.firstvertex
!= new_firstvertex
) ||
845 (vs_prog_data
->uses_baseinstance
&&
846 brw
->draw
.params
.gl_baseinstance
!= new_baseinstance
))
847 brw
->ctx
.NewDriverState
|= BRW_NEW_VERTICES
;
850 brw
->draw
.params
.firstvertex
= new_firstvertex
;
851 brw
->draw
.params
.gl_baseinstance
= new_baseinstance
;
852 brw_bo_unreference(brw
->draw
.draw_params_bo
);
854 if (prim
->is_indirect
) {
855 /* Point draw_params_bo at the indirect buffer. */
856 brw
->draw
.draw_params_bo
=
857 intel_buffer_object(ctx
->DrawIndirectBuffer
)->buffer
;
858 brw_bo_reference(brw
->draw
.draw_params_bo
);
859 brw
->draw
.draw_params_offset
=
860 prim
->indirect_offset
+ (prim
->indexed
? 12 : 8);
862 /* Set draw_params_bo to NULL so brw_prepare_vertices knows it
863 * has to upload gl_BaseVertex and such if they're needed.
865 brw
->draw
.draw_params_bo
= NULL
;
866 brw
->draw
.draw_params_offset
= 0;
869 /* gl_DrawID always needs its own vertex buffer since it's not part of
870 * the indirect parameter buffer. Same for is_indexed_draw, which shares
871 * the buffer with gl_DrawID. If the program uses gl_DrawID, we need to
872 * flag BRW_NEW_VERTICES. For the first iteration, we don't have valid
873 * vs_prog_data, but we always flag BRW_NEW_VERTICES before the loop.
875 if (prim_id
> 0 && vs_prog_data
->uses_drawid
)
876 brw
->ctx
.NewDriverState
|= BRW_NEW_VERTICES
;
878 brw
->draw
.derived_params
.gl_drawid
= prim
->draw_id
;
879 brw
->draw
.derived_params
.is_indexed_draw
= prim
->indexed
? ~0 : 0;
881 brw_bo_unreference(brw
->draw
.derived_draw_params_bo
);
882 brw
->draw
.derived_draw_params_bo
= NULL
;
883 brw
->draw
.derived_draw_params_offset
= 0;
885 if (devinfo
->gen
< 6)
886 brw_set_prim(brw
, prim
);
888 gen6_set_prim(brw
, prim
);
892 /* Note that before the loop, brw->ctx.NewDriverState was set to != 0, and
893 * that the state updated in the loop outside of this block is that in
894 * *_set_prim or intel_batchbuffer_flush(), which only impacts
895 * brw->ctx.NewDriverState.
897 if (brw
->ctx
.NewDriverState
) {
898 brw
->batch
.no_wrap
= true;
899 brw_upload_render_state(brw
);
902 brw_emit_prim(brw
, prim
, brw
->primitive
, xfb_obj
, stream
);
904 brw
->batch
.no_wrap
= false;
906 if (!brw_batch_has_aperture_space(brw
, 0)) {
908 intel_batchbuffer_reset_to_saved(brw
);
909 intel_batchbuffer_flush(brw
);
913 int ret
= intel_batchbuffer_flush(brw
);
914 WARN_ONCE(ret
== -ENOSPC
,
915 "i965: Single primitive emit exceeded "
916 "available aperture space\n");
920 /* Now that we know we haven't run out of aperture space, we can safely
921 * reset the dirty bits.
923 if (brw
->ctx
.NewDriverState
)
924 brw_render_state_finished(brw
);
932 brw_draw_prims(struct gl_context
*ctx
,
933 const struct _mesa_prim
*prims
,
935 const struct _mesa_index_buffer
*ib
,
936 GLboolean index_bounds_valid
,
939 struct gl_transform_feedback_object
*gl_xfb_obj
,
941 struct gl_buffer_object
*indirect
)
944 struct brw_context
*brw
= brw_context(ctx
);
945 int predicate_state
= brw
->predicate
.state
;
946 struct brw_transform_feedback_object
*xfb_obj
=
947 (struct brw_transform_feedback_object
*) gl_xfb_obj
;
949 if (!brw_check_conditional_render(brw
))
952 /* Handle primitive restart if needed */
953 if (brw_handle_primitive_restart(ctx
, prims
, nr_prims
, ib
, indirect
)) {
954 /* The draw was handled, so we can exit now */
958 /* Do GL_SELECT and GL_FEEDBACK rendering using swrast, even though it
959 * won't support all the extensions we support.
961 if (ctx
->RenderMode
!= GL_RENDER
) {
962 perf_debug("%s render mode not supported in hardware\n",
963 _mesa_enum_to_string(ctx
->RenderMode
));
964 _swsetup_Wakeup(ctx
);
966 _tnl_draw(ctx
, prims
, nr_prims
, ib
,
967 index_bounds_valid
, min_index
, max_index
, NULL
, 0, NULL
);
971 /* If we're going to have to upload any of the user's vertex arrays, then
972 * get the minimum and maximum of their index buffer so we know what range
975 if (!index_bounds_valid
&& _mesa_draw_user_array_bits(ctx
) != 0) {
976 perf_debug("Scanning index buffer to compute index buffer bounds. "
977 "Use glDrawRangeElements() to avoid this.\n");
978 vbo_get_minmax_indices(ctx
, prims
, ib
, &min_index
, &max_index
, nr_prims
);
979 index_bounds_valid
= true;
982 brw_prepare_drawing(ctx
, ib
, index_bounds_valid
, min_index
, max_index
);
983 /* Try drawing with the hardware, but don't do anything else if we can't
984 * manage it. swrast doesn't support our featureset, so we can't fall back
988 for (i
= 0; i
< nr_prims
; i
++) {
989 /* Implementation of ARB_indirect_parameters via predicates */
990 if (brw
->draw
.draw_params_count_bo
) {
991 brw_emit_pipe_control_flush(brw
, PIPE_CONTROL_FLUSH_ENABLE
);
993 /* Upload the current draw count from the draw parameters buffer to
996 brw_load_register_mem(brw
, MI_PREDICATE_SRC0
,
997 brw
->draw
.draw_params_count_bo
,
998 brw
->draw
.draw_params_count_offset
);
999 /* Zero the top 32-bits of MI_PREDICATE_SRC0 */
1000 brw_load_register_imm32(brw
, MI_PREDICATE_SRC0
+ 4, 0);
1001 /* Upload the id of the current primitive to MI_PREDICATE_SRC1. */
1002 brw_load_register_imm64(brw
, MI_PREDICATE_SRC1
, prims
[i
].draw_id
);
1005 if (i
== 0 && brw
->predicate
.state
!= BRW_PREDICATE_STATE_USE_BIT
) {
1006 OUT_BATCH(GEN7_MI_PREDICATE
| MI_PREDICATE_LOADOP_LOADINV
|
1007 MI_PREDICATE_COMBINEOP_SET
|
1008 MI_PREDICATE_COMPAREOP_SRCS_EQUAL
);
1010 OUT_BATCH(GEN7_MI_PREDICATE
|
1011 MI_PREDICATE_LOADOP_LOAD
| MI_PREDICATE_COMBINEOP_XOR
|
1012 MI_PREDICATE_COMPAREOP_SRCS_EQUAL
);
1016 brw
->predicate
.state
= BRW_PREDICATE_STATE_USE_BIT
;
1019 brw_draw_single_prim(ctx
, &prims
[i
], i
, xfb_obj
, stream
, indirect
);
1022 brw_finish_drawing(ctx
);
1023 brw
->predicate
.state
= predicate_state
;
1027 brw_draw_indirect_prims(struct gl_context
*ctx
,
1029 struct gl_buffer_object
*indirect_data
,
1030 GLsizeiptr indirect_offset
,
1031 unsigned draw_count
,
1033 struct gl_buffer_object
*indirect_params
,
1034 GLsizeiptr indirect_params_offset
,
1035 const struct _mesa_index_buffer
*ib
)
1037 struct brw_context
*brw
= brw_context(ctx
);
1038 struct _mesa_prim
*prim
;
1041 prim
= calloc(draw_count
, sizeof(*prim
));
1043 _mesa_error(ctx
, GL_OUT_OF_MEMORY
, "gl%sDraw%sIndirect%s",
1044 (draw_count
> 1) ? "Multi" : "",
1045 ib
? "Elements" : "Arrays",
1046 indirect_params
? "CountARB" : "");
1051 prim
[draw_count
- 1].end
= 1;
1052 for (i
= 0; i
< draw_count
; ++i
, indirect_offset
+= stride
) {
1053 prim
[i
].mode
= mode
;
1054 prim
[i
].indexed
= ib
!= NULL
;
1055 prim
[i
].indirect_offset
= indirect_offset
;
1056 prim
[i
].is_indirect
= 1;
1057 prim
[i
].draw_id
= i
;
1060 if (indirect_params
) {
1061 brw
->draw
.draw_params_count_bo
=
1062 intel_buffer_object(indirect_params
)->buffer
;
1063 brw_bo_reference(brw
->draw
.draw_params_count_bo
);
1064 brw
->draw
.draw_params_count_offset
= indirect_params_offset
;
1067 brw_draw_prims(ctx
, prim
, draw_count
,
1076 brw_init_draw_functions(struct dd_function_table
*functions
)
1078 /* Register our drawing function:
1080 functions
->Draw
= brw_draw_prims
;
1081 functions
->DrawIndirect
= brw_draw_indirect_prims
;
1085 brw_draw_init(struct brw_context
*brw
)
1087 for (int i
= 0; i
< VERT_ATTRIB_MAX
; i
++)
1088 brw
->vb
.inputs
[i
].buffer
= -1;
1089 brw
->vb
.nr_buffers
= 0;
1090 brw
->vb
.nr_enabled
= 0;
1094 brw_draw_destroy(struct brw_context
*brw
)
1098 for (i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
1099 brw_bo_unreference(brw
->vb
.buffers
[i
].bo
);
1100 brw
->vb
.buffers
[i
].bo
= NULL
;
1102 brw
->vb
.nr_buffers
= 0;
1104 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
1105 brw
->vb
.enabled
[i
]->buffer
= -1;
1107 brw
->vb
.nr_enabled
= 0;
1109 brw_bo_unreference(brw
->ib
.bo
);