2 * Copyright 2003 VMware, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include <sys/errno.h>
28 #include "main/arrayobj.h"
29 #include "main/blend.h"
30 #include "main/context.h"
31 #include "main/condrender.h"
32 #include "main/samplerobj.h"
33 #include "main/state.h"
34 #include "main/enums.h"
35 #include "main/macros.h"
36 #include "main/transformfeedback.h"
37 #include "main/framebuffer.h"
38 #include "main/varray.h"
41 #include "swrast/swrast.h"
42 #include "swrast_setup/swrast_setup.h"
43 #include "drivers/common/meta.h"
44 #include "util/bitscan.h"
45 #include "util/bitset.h"
47 #include "brw_blorp.h"
49 #include "brw_defines.h"
50 #include "compiler/brw_eu_defines.h"
51 #include "brw_context.h"
52 #include "brw_state.h"
54 #include "intel_batchbuffer.h"
55 #include "intel_buffers.h"
56 #include "intel_fbo.h"
57 #include "intel_mipmap_tree.h"
58 #include "intel_buffer_objects.h"
60 #define FILE_DEBUG_FLAG DEBUG_PRIMS
63 static const GLenum reduced_prim
[GL_POLYGON
+1] = {
64 [GL_POINTS
] = GL_POINTS
,
65 [GL_LINES
] = GL_LINES
,
66 [GL_LINE_LOOP
] = GL_LINES
,
67 [GL_LINE_STRIP
] = GL_LINES
,
68 [GL_TRIANGLES
] = GL_TRIANGLES
,
69 [GL_TRIANGLE_STRIP
] = GL_TRIANGLES
,
70 [GL_TRIANGLE_FAN
] = GL_TRIANGLES
,
71 [GL_QUADS
] = GL_TRIANGLES
,
72 [GL_QUAD_STRIP
] = GL_TRIANGLES
,
73 [GL_POLYGON
] = GL_TRIANGLES
76 /* When the primitive changes, set a state bit and re-validate. Not
77 * the nicest and would rather deal with this by having all the
78 * programs be immune to the active primitive (ie. cope with all
79 * possibilities). That may not be realistic however.
82 brw_set_prim(struct brw_context
*brw
, const struct _mesa_prim
*prim
)
84 struct gl_context
*ctx
= &brw
->ctx
;
85 uint32_t hw_prim
= get_hw_prim_for_gl_prim(prim
->mode
);
87 DBG("PRIM: %s\n", _mesa_enum_to_string(prim
->mode
));
89 /* Slight optimization to avoid the GS program when not needed:
91 if (prim
->mode
== GL_QUAD_STRIP
&&
92 ctx
->Light
.ShadeModel
!= GL_FLAT
&&
93 ctx
->Polygon
.FrontMode
== GL_FILL
&&
94 ctx
->Polygon
.BackMode
== GL_FILL
)
95 hw_prim
= _3DPRIM_TRISTRIP
;
97 if (prim
->mode
== GL_QUADS
&& prim
->count
== 4 &&
98 ctx
->Light
.ShadeModel
!= GL_FLAT
&&
99 ctx
->Polygon
.FrontMode
== GL_FILL
&&
100 ctx
->Polygon
.BackMode
== GL_FILL
) {
101 hw_prim
= _3DPRIM_TRIFAN
;
104 if (hw_prim
!= brw
->primitive
) {
105 brw
->primitive
= hw_prim
;
106 brw
->ctx
.NewDriverState
|= BRW_NEW_PRIMITIVE
;
108 if (reduced_prim
[prim
->mode
] != brw
->reduced_primitive
) {
109 brw
->reduced_primitive
= reduced_prim
[prim
->mode
];
110 brw
->ctx
.NewDriverState
|= BRW_NEW_REDUCED_PRIMITIVE
;
116 gen6_set_prim(struct brw_context
*brw
, const struct _mesa_prim
*prim
)
118 const struct gl_context
*ctx
= &brw
->ctx
;
121 DBG("PRIM: %s\n", _mesa_enum_to_string(prim
->mode
));
123 if (prim
->mode
== GL_PATCHES
) {
124 hw_prim
= _3DPRIM_PATCHLIST(ctx
->TessCtrlProgram
.patch_vertices
);
126 hw_prim
= get_hw_prim_for_gl_prim(prim
->mode
);
129 if (hw_prim
!= brw
->primitive
) {
130 brw
->primitive
= hw_prim
;
131 brw
->ctx
.NewDriverState
|= BRW_NEW_PRIMITIVE
;
132 if (prim
->mode
== GL_PATCHES
)
133 brw
->ctx
.NewDriverState
|= BRW_NEW_PATCH_PRIMITIVE
;
139 * The hardware is capable of removing dangling vertices on its own; however,
140 * prior to Gen6, we sometimes convert quads into trifans (and quad strips
141 * into tristrips), since pre-Gen6 hardware requires a GS to render quads.
142 * This function manually trims dangling vertices from a draw call involving
143 * quads so that those dangling vertices won't get drawn when we convert to
147 trim(GLenum prim
, GLuint length
)
149 if (prim
== GL_QUAD_STRIP
)
150 return length
> 3 ? (length
- length
% 2) : 0;
151 else if (prim
== GL_QUADS
)
152 return length
- length
% 4;
159 brw_emit_prim(struct brw_context
*brw
,
160 const struct _mesa_prim
*prim
,
163 GLuint num_instances
, GLuint base_instance
,
164 struct brw_transform_feedback_object
*xfb_obj
,
167 GLsizeiptr indirect_offset
)
169 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
170 int verts_per_instance
;
171 int vertex_access_type
;
174 DBG("PRIM: %s %d %d\n", _mesa_enum_to_string(prim
->mode
),
175 prim
->start
, prim
->count
);
177 int start_vertex_location
= prim
->start
;
178 int base_vertex_location
= prim
->basevertex
;
181 vertex_access_type
= devinfo
->gen
>= 7 ?
182 GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM
:
183 GEN4_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM
;
184 start_vertex_location
+= brw
->ib
.start_vertex_offset
;
185 base_vertex_location
+= brw
->vb
.start_vertex_bias
;
187 vertex_access_type
= devinfo
->gen
>= 7 ?
188 GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL
:
189 GEN4_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL
;
190 start_vertex_location
+= brw
->vb
.start_vertex_bias
;
193 /* We only need to trim the primitive count on pre-Gen6. */
194 if (devinfo
->gen
< 6)
195 verts_per_instance
= trim(prim
->mode
, prim
->count
);
197 verts_per_instance
= prim
->count
;
199 /* If nothing to emit, just return. */
200 if (verts_per_instance
== 0 && !is_indirect
&& !xfb_obj
)
203 /* If we're set to always flush, do it before and after the primitive emit.
204 * We want to catch both missed flushes that hurt instruction/state cache
205 * and missed flushes of the render cache as it heads to other parts of
206 * the besides the draw code.
208 if (brw
->always_flush_cache
)
209 brw_emit_mi_flush(brw
);
211 /* If indirect, emit a bunch of loads from the indirect BO. */
213 indirect_flag
= GEN7_3DPRIM_INDIRECT_PARAMETER_ENABLE
;
215 brw_load_register_mem(brw
, GEN7_3DPRIM_VERTEX_COUNT
,
216 xfb_obj
->prim_count_bo
,
217 stream
* sizeof(uint32_t));
219 OUT_BATCH(MI_LOAD_REGISTER_IMM
| (9 - 2));
220 OUT_BATCH(GEN7_3DPRIM_INSTANCE_COUNT
);
221 OUT_BATCH(num_instances
);
222 OUT_BATCH(GEN7_3DPRIM_START_VERTEX
);
224 OUT_BATCH(GEN7_3DPRIM_BASE_VERTEX
);
226 OUT_BATCH(GEN7_3DPRIM_START_INSTANCE
);
229 } else if (is_indirect
) {
230 struct gl_buffer_object
*indirect_buffer
= brw
->ctx
.DrawIndirectBuffer
;
231 struct brw_bo
*bo
= intel_bufferobj_buffer(brw
,
232 intel_buffer_object(indirect_buffer
),
233 indirect_offset
, 5 * sizeof(GLuint
), false);
235 indirect_flag
= GEN7_3DPRIM_INDIRECT_PARAMETER_ENABLE
;
237 brw_load_register_mem(brw
, GEN7_3DPRIM_VERTEX_COUNT
, bo
,
238 indirect_offset
+ 0);
239 brw_load_register_mem(brw
, GEN7_3DPRIM_INSTANCE_COUNT
, bo
,
240 indirect_offset
+ 4);
242 brw_load_register_mem(brw
, GEN7_3DPRIM_START_VERTEX
, bo
,
243 indirect_offset
+ 8);
245 brw_load_register_mem(brw
, GEN7_3DPRIM_BASE_VERTEX
, bo
,
246 indirect_offset
+ 12);
247 brw_load_register_mem(brw
, GEN7_3DPRIM_START_INSTANCE
, bo
,
248 indirect_offset
+ 16);
250 brw_load_register_mem(brw
, GEN7_3DPRIM_START_INSTANCE
, bo
,
251 indirect_offset
+ 12);
252 brw_load_register_imm32(brw
, GEN7_3DPRIM_BASE_VERTEX
, 0);
258 BEGIN_BATCH(devinfo
->gen
>= 7 ? 7 : 6);
260 if (devinfo
->gen
>= 7) {
261 const int predicate_enable
=
262 (brw
->predicate
.state
== BRW_PREDICATE_STATE_USE_BIT
)
263 ? GEN7_3DPRIM_PREDICATE_ENABLE
: 0;
265 OUT_BATCH(CMD_3D_PRIM
<< 16 | (7 - 2) | indirect_flag
| predicate_enable
);
266 OUT_BATCH(hw_prim
| vertex_access_type
);
268 OUT_BATCH(CMD_3D_PRIM
<< 16 | (6 - 2) |
269 hw_prim
<< GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT
|
272 OUT_BATCH(verts_per_instance
);
273 OUT_BATCH(start_vertex_location
);
274 OUT_BATCH(num_instances
);
275 OUT_BATCH(base_instance
);
276 OUT_BATCH(base_vertex_location
);
279 if (brw
->always_flush_cache
)
280 brw_emit_mi_flush(brw
);
285 brw_clear_buffers(struct brw_context
*brw
)
287 for (unsigned i
= 0; i
< brw
->vb
.nr_buffers
; ++i
) {
288 brw_bo_unreference(brw
->vb
.buffers
[i
].bo
);
289 brw
->vb
.buffers
[i
].bo
= NULL
;
291 brw
->vb
.nr_buffers
= 0;
293 for (unsigned i
= 0; i
< brw
->vb
.nr_enabled
; ++i
) {
294 brw
->vb
.enabled
[i
]->buffer
= -1;
297 for (unsigned i
= 0; i
< VERT_ATTRIB_MAX
; i
++) {
298 assert(brw
->vb
.inputs
[i
].buffer
== -1);
304 static uint8_t get_wa_flags(const struct gl_vertex_format
*glformat
)
306 uint8_t wa_flags
= 0;
308 switch (glformat
->Type
) {
310 wa_flags
= glformat
->Size
;
313 case GL_INT_2_10_10_10_REV
:
314 wa_flags
|= BRW_ATTRIB_WA_SIGN
;
317 case GL_UNSIGNED_INT_2_10_10_10_REV
:
318 if (glformat
->Format
== GL_BGRA
)
319 wa_flags
|= BRW_ATTRIB_WA_BGRA
;
321 if (glformat
->Normalized
)
322 wa_flags
|= BRW_ATTRIB_WA_NORMALIZE
;
323 else if (!glformat
->Integer
)
324 wa_flags
|= BRW_ATTRIB_WA_SCALE
;
334 brw_merge_inputs(struct brw_context
*brw
)
336 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
337 const struct gl_context
*ctx
= &brw
->ctx
;
339 if (devinfo
->gen
< 8 && !devinfo
->is_haswell
) {
340 /* Prior to Haswell, the hardware can't natively support GL_FIXED or
341 * 2_10_10_10_REV vertex formats. Set appropriate workaround flags.
343 const struct gl_vertex_array_object
*vao
= ctx
->Array
._DrawVAO
;
344 const uint64_t vs_inputs
= ctx
->VertexProgram
._Current
->info
.inputs_read
;
345 assert((vs_inputs
& ~((uint64_t)VERT_BIT_ALL
)) == 0);
347 unsigned vaomask
= vs_inputs
& _mesa_draw_array_bits(ctx
);
349 const gl_vert_attrib i
= u_bit_scan(&vaomask
);
350 const uint8_t wa_flags
=
351 get_wa_flags(_mesa_draw_array_format(vao
, i
));
353 if (brw
->vb
.attrib_wa_flags
[i
] != wa_flags
) {
354 brw
->vb
.attrib_wa_flags
[i
] = wa_flags
;
355 brw
->ctx
.NewDriverState
|= BRW_NEW_VS_ATTRIB_WORKAROUNDS
;
359 unsigned currmask
= vs_inputs
& _mesa_draw_current_bits(ctx
);
361 const gl_vert_attrib i
= u_bit_scan(&currmask
);
362 const uint8_t wa_flags
=
363 get_wa_flags(_mesa_draw_current_format(ctx
, i
));
365 if (brw
->vb
.attrib_wa_flags
[i
] != wa_flags
) {
366 brw
->vb
.attrib_wa_flags
[i
] = wa_flags
;
367 brw
->ctx
.NewDriverState
|= BRW_NEW_VS_ATTRIB_WORKAROUNDS
;
373 /* Disable auxiliary buffers if a renderbuffer is also bound as a texture
374 * or shader image. This causes a self-dependency, where both rendering
375 * and sampling may concurrently read or write the CCS buffer, causing
379 intel_disable_rb_aux_buffer(struct brw_context
*brw
,
380 bool *draw_aux_buffer_disabled
,
381 struct intel_mipmap_tree
*tex_mt
,
382 unsigned min_level
, unsigned num_levels
,
385 const struct gl_framebuffer
*fb
= brw
->ctx
.DrawBuffer
;
388 /* We only need to worry about color compression and fast clears. */
389 if (tex_mt
->aux_usage
!= ISL_AUX_USAGE_CCS_D
&&
390 tex_mt
->aux_usage
!= ISL_AUX_USAGE_CCS_E
)
393 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
394 const struct intel_renderbuffer
*irb
=
395 intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
397 if (irb
&& irb
->mt
->bo
== tex_mt
->bo
&&
398 irb
->mt_level
>= min_level
&&
399 irb
->mt_level
< min_level
+ num_levels
) {
400 found
= draw_aux_buffer_disabled
[i
] = true;
405 perf_debug("Disabling CCS because a renderbuffer is also bound %s.\n",
412 /** Implement the ASTC 5x5 sampler workaround
414 * Gen9 sampling hardware has a bug where an ASTC 5x5 compressed surface
415 * cannot live in the sampler cache at the same time as an aux compressed
416 * surface. In order to work around the bug we have to stall rendering with a
417 * CS and pixel scoreboard stall (implicit in the CS stall) and invalidate the
418 * texture cache whenever one of ASTC 5x5 or aux compressed may be in the
419 * sampler cache and we're about to render with something which samples from
422 * In the case of a single shader which textures from both ASTC 5x5 and
423 * a texture which is CCS or HiZ compressed, we have to resolve the aux
424 * compressed texture prior to rendering. This second part is handled in
425 * brw_predraw_resolve_inputs() below.
427 * We have observed this issue to affect CCS and HiZ sampling but whether or
428 * not it also affects MCS is unknown. Because MCS has no concept of a
429 * resolve (and doing one would be stupid expensive), we choose to simply
430 * ignore the possibility and hope for the best.
433 gen9_apply_astc5x5_wa_flush(struct brw_context
*brw
,
434 enum gen9_astc5x5_wa_tex_type curr_mask
)
436 assert(brw
->screen
->devinfo
.gen
== 9);
438 if (((brw
->gen9_astc5x5_wa_tex_mask
& GEN9_ASTC5X5_WA_TEX_TYPE_ASTC5x5
) &&
439 (curr_mask
& GEN9_ASTC5X5_WA_TEX_TYPE_AUX
)) ||
440 ((brw
->gen9_astc5x5_wa_tex_mask
& GEN9_ASTC5X5_WA_TEX_TYPE_AUX
) &&
441 (curr_mask
& GEN9_ASTC5X5_WA_TEX_TYPE_ASTC5x5
))) {
442 brw_emit_pipe_control_flush(brw
, PIPE_CONTROL_CS_STALL
);
443 brw_emit_pipe_control_flush(brw
, PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
);
446 brw
->gen9_astc5x5_wa_tex_mask
= curr_mask
;
449 static enum gen9_astc5x5_wa_tex_type
450 gen9_astc5x5_wa_bits(mesa_format format
, enum isl_aux_usage aux_usage
)
452 if (aux_usage
!= ISL_AUX_USAGE_NONE
&&
453 aux_usage
!= ISL_AUX_USAGE_MCS
)
454 return GEN9_ASTC5X5_WA_TEX_TYPE_AUX
;
456 if (format
== MESA_FORMAT_RGBA_ASTC_5x5
||
457 format
== MESA_FORMAT_SRGB8_ALPHA8_ASTC_5x5
)
458 return GEN9_ASTC5X5_WA_TEX_TYPE_ASTC5x5
;
463 /* Helper for the gen9 ASTC 5x5 workaround. This version exists for BLORP's
464 * use-cases where only a single texture is bound.
467 gen9_apply_single_tex_astc5x5_wa(struct brw_context
*brw
,
469 enum isl_aux_usage aux_usage
)
471 gen9_apply_astc5x5_wa_flush(brw
, gen9_astc5x5_wa_bits(format
, aux_usage
));
475 mark_textures_used_for_txf(BITSET_WORD
*used_for_txf
,
476 const struct gl_program
*prog
)
481 uint32_t mask
= prog
->info
.textures_used_by_txf
;
483 int s
= u_bit_scan(&mask
);
484 BITSET_SET(used_for_txf
, prog
->SamplerUnits
[s
]);
489 * \brief Resolve buffers before drawing.
491 * Resolve the depth buffer's HiZ buffer, resolve the depth buffer of each
492 * enabled depth texture, and flush the render cache for any dirty textures.
495 brw_predraw_resolve_inputs(struct brw_context
*brw
, bool rendering
,
496 bool *draw_aux_buffer_disabled
)
498 struct gl_context
*ctx
= &brw
->ctx
;
499 struct intel_texture_object
*tex_obj
;
501 BITSET_DECLARE(used_for_txf
, MAX_COMBINED_TEXTURE_IMAGE_UNITS
);
502 memset(used_for_txf
, 0, sizeof(used_for_txf
));
504 mark_textures_used_for_txf(used_for_txf
, ctx
->VertexProgram
._Current
);
505 mark_textures_used_for_txf(used_for_txf
, ctx
->TessCtrlProgram
._Current
);
506 mark_textures_used_for_txf(used_for_txf
, ctx
->TessEvalProgram
._Current
);
507 mark_textures_used_for_txf(used_for_txf
, ctx
->GeometryProgram
._Current
);
508 mark_textures_used_for_txf(used_for_txf
, ctx
->FragmentProgram
._Current
);
510 mark_textures_used_for_txf(used_for_txf
, ctx
->ComputeProgram
._Current
);
513 int maxEnabledUnit
= ctx
->Texture
._MaxEnabledTexImageUnit
;
515 enum gen9_astc5x5_wa_tex_type astc5x5_wa_bits
= 0;
516 if (brw
->screen
->devinfo
.gen
== 9) {
517 /* In order to properly implement the ASTC 5x5 workaround for an
518 * arbitrary draw or dispatch call, we have to walk the entire list of
519 * textures looking for ASTC 5x5. If there is any ASTC 5x5 in this draw
520 * call, all aux compressed textures must be resolved and have aux
521 * compression disabled while sampling.
523 for (int i
= 0; i
<= maxEnabledUnit
; i
++) {
524 if (!ctx
->Texture
.Unit
[i
]._Current
)
526 tex_obj
= intel_texture_object(ctx
->Texture
.Unit
[i
]._Current
);
527 if (!tex_obj
|| !tex_obj
->mt
)
530 astc5x5_wa_bits
|= gen9_astc5x5_wa_bits(tex_obj
->_Format
,
531 tex_obj
->mt
->aux_usage
);
533 gen9_apply_astc5x5_wa_flush(brw
, astc5x5_wa_bits
);
536 /* Resolve depth buffer and render cache of each enabled texture. */
537 for (int i
= 0; i
<= maxEnabledUnit
; i
++) {
538 if (!ctx
->Texture
.Unit
[i
]._Current
)
540 tex_obj
= intel_texture_object(ctx
->Texture
.Unit
[i
]._Current
);
541 if (!tex_obj
|| !tex_obj
->mt
)
544 struct gl_sampler_object
*sampler
= _mesa_get_samplerobj(ctx
, i
);
545 enum isl_format view_format
=
546 translate_tex_format(brw
, tex_obj
->_Format
, sampler
->sRGBDecode
);
548 unsigned min_level
, min_layer
, num_levels
, num_layers
;
549 if (tex_obj
->base
.Immutable
) {
550 min_level
= tex_obj
->base
.MinLevel
;
551 num_levels
= MIN2(tex_obj
->base
.NumLevels
, tex_obj
->_MaxLevel
+ 1);
552 min_layer
= tex_obj
->base
.MinLayer
;
553 num_layers
= tex_obj
->base
.Target
!= GL_TEXTURE_3D
?
554 tex_obj
->base
.NumLayers
: INTEL_REMAINING_LAYERS
;
556 min_level
= tex_obj
->base
.BaseLevel
;
557 num_levels
= tex_obj
->_MaxLevel
- tex_obj
->base
.BaseLevel
+ 1;
559 num_layers
= INTEL_REMAINING_LAYERS
;
563 intel_disable_rb_aux_buffer(brw
, draw_aux_buffer_disabled
,
564 tex_obj
->mt
, min_level
, num_levels
,
568 intel_miptree_prepare_texture(brw
, tex_obj
->mt
, view_format
,
569 min_level
, num_levels
,
570 min_layer
, num_layers
,
573 /* If any programs are using it with texelFetch, we may need to also do
574 * a prepare with an sRGB format to ensure texelFetch works "properly".
576 if (BITSET_TEST(used_for_txf
, i
)) {
577 enum isl_format txf_format
=
578 translate_tex_format(brw
, tex_obj
->_Format
, GL_DECODE_EXT
);
579 if (txf_format
!= view_format
) {
580 intel_miptree_prepare_texture(brw
, tex_obj
->mt
, txf_format
,
581 min_level
, num_levels
,
582 min_layer
, num_layers
,
587 brw_cache_flush_for_read(brw
, tex_obj
->mt
->bo
);
589 if (tex_obj
->base
.StencilSampling
||
590 tex_obj
->mt
->format
== MESA_FORMAT_S_UINT8
) {
591 intel_update_r8stencil(brw
, tex_obj
->mt
);
594 if (intel_miptree_has_etc_shadow(brw
, tex_obj
->mt
) &&
595 tex_obj
->mt
->shadow_needs_update
) {
596 intel_miptree_update_etc_shadow_levels(brw
, tex_obj
->mt
);
600 /* Resolve color for each active shader image. */
601 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
602 const struct gl_program
*prog
= ctx
->_Shader
->CurrentProgram
[i
];
604 if (unlikely(prog
&& prog
->info
.num_images
)) {
605 for (unsigned j
= 0; j
< prog
->info
.num_images
; j
++) {
606 struct gl_image_unit
*u
=
607 &ctx
->ImageUnits
[prog
->sh
.ImageUnits
[j
]];
608 tex_obj
= intel_texture_object(u
->TexObj
);
610 if (tex_obj
&& tex_obj
->mt
) {
612 intel_disable_rb_aux_buffer(brw
, draw_aux_buffer_disabled
,
614 "as a shader image");
617 intel_miptree_prepare_image(brw
, tex_obj
->mt
);
619 brw_cache_flush_for_read(brw
, tex_obj
->mt
->bo
);
627 brw_predraw_resolve_framebuffer(struct brw_context
*brw
,
628 bool *draw_aux_buffer_disabled
)
630 struct gl_context
*ctx
= &brw
->ctx
;
631 struct intel_renderbuffer
*depth_irb
;
633 /* Resolve the depth buffer's HiZ buffer. */
634 depth_irb
= intel_get_renderbuffer(ctx
->DrawBuffer
, BUFFER_DEPTH
);
635 if (depth_irb
&& depth_irb
->mt
) {
636 intel_miptree_prepare_depth(brw
, depth_irb
->mt
,
639 depth_irb
->layer_count
);
642 /* Resolve color buffers for non-coherent framebuffer fetch. */
643 if (!ctx
->Extensions
.EXT_shader_framebuffer_fetch
&&
644 ctx
->FragmentProgram
._Current
&&
645 ctx
->FragmentProgram
._Current
->info
.outputs_read
) {
646 const struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
648 /* This is only used for non-coherent framebuffer fetch, so we don't
649 * need to worry about CCS_E and can simply pass 'false' below.
651 assert(brw
->screen
->devinfo
.gen
< 9);
653 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
654 const struct intel_renderbuffer
*irb
=
655 intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
658 intel_miptree_prepare_texture(brw
, irb
->mt
, irb
->mt
->surf
.format
,
660 irb
->mt_layer
, irb
->layer_count
,
661 brw
->gen9_astc5x5_wa_tex_mask
);
666 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
667 for (int i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
668 struct intel_renderbuffer
*irb
=
669 intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
671 if (irb
== NULL
|| irb
->mt
== NULL
)
674 mesa_format mesa_format
=
675 _mesa_get_render_format(ctx
, intel_rb_format(irb
));
676 enum isl_format isl_format
= brw_isl_format_for_mesa_format(mesa_format
);
677 bool blend_enabled
= ctx
->Color
.BlendEnabled
& (1 << i
);
678 enum isl_aux_usage aux_usage
=
679 intel_miptree_render_aux_usage(brw
, irb
->mt
, isl_format
,
681 draw_aux_buffer_disabled
[i
]);
682 if (brw
->draw_aux_usage
[i
] != aux_usage
) {
683 brw
->ctx
.NewDriverState
|= BRW_NEW_AUX_STATE
;
684 brw
->draw_aux_usage
[i
] = aux_usage
;
687 intel_miptree_prepare_render(brw
, irb
->mt
, irb
->mt_level
,
688 irb
->mt_layer
, irb
->layer_count
,
691 brw_cache_flush_for_render(brw
, irb
->mt
->bo
,
692 isl_format
, aux_usage
);
697 * \brief Call this after drawing to mark which buffers need resolving
699 * If the depth buffer was written to and if it has an accompanying HiZ
700 * buffer, then mark that it needs a depth resolve.
702 * If the stencil buffer was written to then mark that it may need to be
703 * copied to an R8 texture.
705 * If the color buffer is a multisample window system buffer, then
706 * mark that it needs a downsample.
708 * Also mark any render targets which will be textured as needing a render
712 brw_postdraw_set_buffers_need_resolve(struct brw_context
*brw
)
714 struct gl_context
*ctx
= &brw
->ctx
;
715 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
717 struct intel_renderbuffer
*front_irb
= NULL
;
718 struct intel_renderbuffer
*back_irb
= intel_get_renderbuffer(fb
, BUFFER_BACK_LEFT
);
719 struct intel_renderbuffer
*depth_irb
= intel_get_renderbuffer(fb
, BUFFER_DEPTH
);
720 struct intel_renderbuffer
*stencil_irb
= intel_get_renderbuffer(fb
, BUFFER_STENCIL
);
721 struct gl_renderbuffer_attachment
*depth_att
= &fb
->Attachment
[BUFFER_DEPTH
];
723 if (_mesa_is_front_buffer_drawing(fb
))
724 front_irb
= intel_get_renderbuffer(fb
, BUFFER_FRONT_LEFT
);
727 front_irb
->need_downsample
= true;
729 back_irb
->need_downsample
= true;
731 bool depth_written
= brw_depth_writes_enabled(brw
);
732 if (depth_att
->Layered
) {
733 intel_miptree_finish_depth(brw
, depth_irb
->mt
,
736 depth_irb
->layer_count
,
739 intel_miptree_finish_depth(brw
, depth_irb
->mt
,
741 depth_irb
->mt_layer
, 1,
745 brw_depth_cache_add_bo(brw
, depth_irb
->mt
->bo
);
748 if (stencil_irb
&& brw
->stencil_write_enabled
) {
749 struct intel_mipmap_tree
*stencil_mt
=
750 stencil_irb
->mt
->stencil_mt
!= NULL
?
751 stencil_irb
->mt
->stencil_mt
: stencil_irb
->mt
;
752 brw_depth_cache_add_bo(brw
, stencil_mt
->bo
);
753 intel_miptree_finish_write(brw
, stencil_mt
, stencil_irb
->mt_level
,
754 stencil_irb
->mt_layer
,
755 stencil_irb
->layer_count
, ISL_AUX_USAGE_NONE
);
758 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
759 struct intel_renderbuffer
*irb
=
760 intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
765 mesa_format mesa_format
=
766 _mesa_get_render_format(ctx
, intel_rb_format(irb
));
767 enum isl_format isl_format
= brw_isl_format_for_mesa_format(mesa_format
);
768 enum isl_aux_usage aux_usage
= brw
->draw_aux_usage
[i
];
770 brw_render_cache_add_bo(brw
, irb
->mt
->bo
, isl_format
, aux_usage
);
772 intel_miptree_finish_render(brw
, irb
->mt
, irb
->mt_level
,
773 irb
->mt_layer
, irb
->layer_count
,
779 intel_renderbuffer_move_temp_back(struct brw_context
*brw
,
780 struct intel_renderbuffer
*irb
)
782 if (irb
->align_wa_mt
== NULL
)
785 brw_cache_flush_for_read(brw
, irb
->align_wa_mt
->bo
);
787 intel_miptree_copy_slice(brw
, irb
->align_wa_mt
, 0, 0,
789 irb
->Base
.Base
.TexImage
->Level
, irb
->mt_layer
);
791 intel_miptree_reference(&irb
->align_wa_mt
, NULL
);
793 /* Finally restore the x,y to correspond to full miptree. */
794 intel_renderbuffer_set_draw_offset(irb
);
796 /* Make sure render surface state gets re-emitted with updated miptree. */
797 brw
->NewGLState
|= _NEW_BUFFERS
;
801 brw_postdraw_reconcile_align_wa_slices(struct brw_context
*brw
)
803 struct gl_context
*ctx
= &brw
->ctx
;
804 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
806 struct intel_renderbuffer
*depth_irb
=
807 intel_get_renderbuffer(fb
, BUFFER_DEPTH
);
808 struct intel_renderbuffer
*stencil_irb
=
809 intel_get_renderbuffer(fb
, BUFFER_STENCIL
);
811 if (depth_irb
&& depth_irb
->align_wa_mt
)
812 intel_renderbuffer_move_temp_back(brw
, depth_irb
);
814 if (stencil_irb
&& stencil_irb
->align_wa_mt
)
815 intel_renderbuffer_move_temp_back(brw
, stencil_irb
);
817 for (unsigned i
= 0; i
< fb
->_NumColorDrawBuffers
; i
++) {
818 struct intel_renderbuffer
*irb
=
819 intel_renderbuffer(fb
->_ColorDrawBuffers
[i
]);
821 if (!irb
|| irb
->align_wa_mt
== NULL
)
824 intel_renderbuffer_move_temp_back(brw
, irb
);
829 brw_prepare_drawing(struct gl_context
*ctx
,
830 const struct _mesa_index_buffer
*ib
,
831 bool index_bounds_valid
,
835 struct brw_context
*brw
= brw_context(ctx
);
838 _mesa_update_state(ctx
);
840 /* We have to validate the textures *before* checking for fallbacks;
841 * otherwise, the software fallback won't be able to rely on the
842 * texture state, the firstLevel and lastLevel fields won't be
843 * set in the intel texture object (they'll both be 0), and the
844 * software fallback will segfault if it attempts to access any
845 * texture level other than level 0.
847 brw_validate_textures(brw
);
849 /* Find the highest sampler unit used by each shader program. A bit-count
850 * won't work since ARB programs use the texture unit number as the sampler
853 brw
->wm
.base
.sampler_count
=
854 util_last_bit(ctx
->FragmentProgram
._Current
->info
.textures_used
);
855 brw
->gs
.base
.sampler_count
= ctx
->GeometryProgram
._Current
?
856 util_last_bit(ctx
->GeometryProgram
._Current
->info
.textures_used
) : 0;
857 brw
->tes
.base
.sampler_count
= ctx
->TessEvalProgram
._Current
?
858 util_last_bit(ctx
->TessEvalProgram
._Current
->info
.textures_used
) : 0;
859 brw
->tcs
.base
.sampler_count
= ctx
->TessCtrlProgram
._Current
?
860 util_last_bit(ctx
->TessCtrlProgram
._Current
->info
.textures_used
) : 0;
861 brw
->vs
.base
.sampler_count
=
862 util_last_bit(ctx
->VertexProgram
._Current
->info
.textures_used
);
864 intel_prepare_render(brw
);
866 /* This workaround has to happen outside of brw_upload_render_state()
867 * because it may flush the batchbuffer for a blit, affecting the state
870 brw_workaround_depthstencil_alignment(brw
, 0);
872 /* Resolves must occur after updating renderbuffers, updating context state,
873 * and finalizing textures but before setting up any hardware state for
876 bool draw_aux_buffer_disabled
[MAX_DRAW_BUFFERS
] = { };
877 brw_predraw_resolve_inputs(brw
, true, draw_aux_buffer_disabled
);
878 brw_predraw_resolve_framebuffer(brw
, draw_aux_buffer_disabled
);
880 /* Bind all inputs, derive varying and size information:
882 brw_clear_buffers(brw
);
883 brw_merge_inputs(brw
);
886 brw
->ctx
.NewDriverState
|= BRW_NEW_INDICES
;
888 brw
->vb
.index_bounds_valid
= index_bounds_valid
;
889 brw
->vb
.min_index
= min_index
;
890 brw
->vb
.max_index
= max_index
;
891 brw
->ctx
.NewDriverState
|= BRW_NEW_VERTICES
;
895 brw_finish_drawing(struct gl_context
*ctx
)
897 struct brw_context
*brw
= brw_context(ctx
);
899 if (brw
->always_flush_batch
)
900 intel_batchbuffer_flush(brw
);
902 brw_program_cache_check_size(brw
);
903 brw_postdraw_reconcile_align_wa_slices(brw
);
904 brw_postdraw_set_buffers_need_resolve(brw
);
906 if (brw
->draw
.draw_params_count_bo
) {
907 brw_bo_unreference(brw
->draw
.draw_params_count_bo
);
908 brw
->draw
.draw_params_count_bo
= NULL
;
911 if (brw
->draw
.draw_params_bo
) {
912 brw_bo_unreference(brw
->draw
.draw_params_bo
);
913 brw
->draw
.draw_params_bo
= NULL
;
916 if (brw
->draw
.derived_draw_params_bo
) {
917 brw_bo_unreference(brw
->draw
.derived_draw_params_bo
);
918 brw
->draw
.derived_draw_params_bo
= NULL
;
923 * Implement workarounds for preemption:
924 * - WaDisableMidObjectPreemptionForGSLineStripAdj
925 * - WaDisableMidObjectPreemptionForTrifanOrPolygon
926 * - WaDisableMidObjectPreemptionForLineLoop
930 gen9_emit_preempt_wa(struct brw_context
*brw
,
931 const struct _mesa_prim
*prim
, GLuint num_instances
)
933 bool object_preemption
= true;
934 ASSERTED
const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
936 /* Only apply these workarounds for gen9 */
937 assert(devinfo
->gen
== 9);
939 /* WaDisableMidObjectPreemptionForGSLineStripAdj
941 * WA: Disable mid-draw preemption when draw-call is a linestrip_adj and
944 if (brw
->primitive
== _3DPRIM_LINESTRIP_ADJ
&& brw
->gs
.enabled
)
945 object_preemption
= false;
947 /* WaDisableMidObjectPreemptionForTrifanOrPolygon
949 * TriFan miscompare in Execlist Preemption test. Cut index that is on a
950 * previous context. End the previous, the resume another context with a
951 * tri-fan or polygon, and the vertex count is corrupted. If we prempt
952 * again we will cause corruption.
954 * WA: Disable mid-draw preemption when draw-call has a tri-fan.
956 if (brw
->primitive
== _3DPRIM_TRIFAN
)
957 object_preemption
= false;
959 /* WaDisableMidObjectPreemptionForLineLoop
961 * VF Stats Counters Missing a vertex when preemption enabled.
963 * WA: Disable mid-draw preemption when the draw uses a lineloop
966 if (brw
->primitive
== _3DPRIM_LINELOOP
)
967 object_preemption
= false;
971 * VF is corrupting GAFS data when preempted on an instance boundary and
972 * replayed with instancing enabled.
974 * WA: Disable preemption when using instanceing.
976 if (num_instances
> 1)
977 object_preemption
= false;
979 brw_enable_obj_preemption(brw
, object_preemption
);
982 /* May fail if out of video memory for texture or vbo upload, or on
983 * fallback conditions.
986 brw_draw_single_prim(struct gl_context
*ctx
,
987 const struct _mesa_prim
*prim
,
990 GLuint num_instances
, GLuint base_instance
,
991 struct brw_transform_feedback_object
*xfb_obj
,
993 GLsizeiptr indirect_offset
)
995 struct brw_context
*brw
= brw_context(ctx
);
996 const struct gen_device_info
*devinfo
= &brw
->screen
->devinfo
;
998 bool is_indirect
= brw
->draw
.draw_indirect_data
!= NULL
;
1000 /* Flag BRW_NEW_DRAW_CALL on every draw. This allows us to have
1001 * atoms that happen on every draw call.
1003 brw
->ctx
.NewDriverState
|= BRW_NEW_DRAW_CALL
;
1005 /* Flush the batch if the batch/state buffers are nearly full. We can
1006 * grow them if needed, but this is not free, so we'd like to avoid it.
1008 intel_batchbuffer_require_space(brw
, 1500);
1009 brw_require_statebuffer_space(brw
, 2400);
1010 intel_batchbuffer_save_state(brw
);
1011 fail_next
= intel_batchbuffer_saved_state_is_empty(brw
);
1013 if (brw
->num_instances
!= num_instances
||
1014 brw
->basevertex
!= prim
->basevertex
||
1015 brw
->baseinstance
!= base_instance
) {
1016 brw
->num_instances
= num_instances
;
1017 brw
->basevertex
= prim
->basevertex
;
1018 brw
->baseinstance
= base_instance
;
1019 if (prim_id
> 0) { /* For i == 0 we just did this before the loop */
1020 brw
->ctx
.NewDriverState
|= BRW_NEW_VERTICES
;
1021 brw_clear_buffers(brw
);
1025 /* Determine if we need to flag BRW_NEW_VERTICES for updating the
1026 * gl_BaseVertexARB or gl_BaseInstanceARB values. For indirect draw, we
1027 * always flag if the shader uses one of the values. For direct draws,
1028 * we only flag if the values change.
1030 const int new_firstvertex
=
1031 is_indexed
? prim
->basevertex
: prim
->start
;
1032 const int new_baseinstance
= base_instance
;
1033 const struct brw_vs_prog_data
*vs_prog_data
=
1034 brw_vs_prog_data(brw
->vs
.base
.prog_data
);
1036 const bool uses_draw_parameters
=
1037 vs_prog_data
->uses_firstvertex
||
1038 vs_prog_data
->uses_baseinstance
;
1040 if ((uses_draw_parameters
&& is_indirect
) ||
1041 (vs_prog_data
->uses_firstvertex
&&
1042 brw
->draw
.params
.firstvertex
!= new_firstvertex
) ||
1043 (vs_prog_data
->uses_baseinstance
&&
1044 brw
->draw
.params
.gl_baseinstance
!= new_baseinstance
))
1045 brw
->ctx
.NewDriverState
|= BRW_NEW_VERTICES
;
1048 brw
->draw
.params
.firstvertex
= new_firstvertex
;
1049 brw
->draw
.params
.gl_baseinstance
= new_baseinstance
;
1050 brw_bo_unreference(brw
->draw
.draw_params_bo
);
1053 /* Point draw_params_bo at the indirect buffer. */
1054 brw
->draw
.draw_params_bo
=
1055 intel_buffer_object(ctx
->DrawIndirectBuffer
)->buffer
;
1056 brw_bo_reference(brw
->draw
.draw_params_bo
);
1057 brw
->draw
.draw_params_offset
=
1058 indirect_offset
+ (is_indexed
? 12 : 8);
1060 /* Set draw_params_bo to NULL so brw_prepare_vertices knows it
1061 * has to upload gl_BaseVertex and such if they're needed.
1063 brw
->draw
.draw_params_bo
= NULL
;
1064 brw
->draw
.draw_params_offset
= 0;
1067 /* gl_DrawID always needs its own vertex buffer since it's not part of
1068 * the indirect parameter buffer. Same for is_indexed_draw, which shares
1069 * the buffer with gl_DrawID. If the program uses gl_DrawID, we need to
1070 * flag BRW_NEW_VERTICES. For the first iteration, we don't have valid
1071 * vs_prog_data, but we always flag BRW_NEW_VERTICES before the loop.
1073 if (prim_id
> 0 && vs_prog_data
->uses_drawid
)
1074 brw
->ctx
.NewDriverState
|= BRW_NEW_VERTICES
;
1076 brw
->draw
.derived_params
.gl_drawid
= prim
->draw_id
;
1077 brw
->draw
.derived_params
.is_indexed_draw
= is_indexed
? ~0 : 0;
1079 brw_bo_unreference(brw
->draw
.derived_draw_params_bo
);
1080 brw
->draw
.derived_draw_params_bo
= NULL
;
1081 brw
->draw
.derived_draw_params_offset
= 0;
1083 if (devinfo
->gen
< 6)
1084 brw_set_prim(brw
, prim
);
1086 gen6_set_prim(brw
, prim
);
1090 /* Note that before the loop, brw->ctx.NewDriverState was set to != 0, and
1091 * that the state updated in the loop outside of this block is that in
1092 * *_set_prim or intel_batchbuffer_flush(), which only impacts
1093 * brw->ctx.NewDriverState.
1095 if (brw
->ctx
.NewDriverState
) {
1096 brw
->batch
.no_wrap
= true;
1097 brw_upload_render_state(brw
);
1100 if (devinfo
->gen
== 9)
1101 gen9_emit_preempt_wa(brw
, prim
, num_instances
);
1103 brw_emit_prim(brw
, prim
, brw
->primitive
, is_indexed
, num_instances
,
1104 base_instance
, xfb_obj
, stream
, is_indirect
,
1107 brw
->batch
.no_wrap
= false;
1109 if (!brw_batch_has_aperture_space(brw
, 0)) {
1111 intel_batchbuffer_reset_to_saved(brw
);
1112 intel_batchbuffer_flush(brw
);
1116 int ret
= intel_batchbuffer_flush(brw
);
1117 WARN_ONCE(ret
== -ENOSPC
,
1118 "i965: Single primitive emit exceeded "
1119 "available aperture space\n");
1123 /* Now that we know we haven't run out of aperture space, we can safely
1124 * reset the dirty bits.
1126 if (brw
->ctx
.NewDriverState
)
1127 brw_render_state_finished(brw
);
1135 brw_draw_prims(struct gl_context
*ctx
,
1136 const struct _mesa_prim
*prims
,
1138 const struct _mesa_index_buffer
*ib
,
1139 GLboolean index_bounds_valid
,
1142 GLuint num_instances
,
1143 GLuint base_instance
,
1144 struct gl_transform_feedback_object
*gl_xfb_obj
,
1148 struct brw_context
*brw
= brw_context(ctx
);
1149 int predicate_state
= brw
->predicate
.state
;
1150 struct brw_transform_feedback_object
*xfb_obj
=
1151 (struct brw_transform_feedback_object
*) gl_xfb_obj
;
1153 if (!brw_check_conditional_render(brw
))
1156 /* Handle primitive restart if needed */
1157 if (brw_handle_primitive_restart(ctx
, prims
, nr_prims
, ib
, num_instances
,
1159 /* The draw was handled, so we can exit now */
1163 /* Do GL_SELECT and GL_FEEDBACK rendering using swrast, even though it
1164 * won't support all the extensions we support.
1166 if (ctx
->RenderMode
!= GL_RENDER
) {
1167 perf_debug("%s render mode not supported in hardware\n",
1168 _mesa_enum_to_string(ctx
->RenderMode
));
1169 _swsetup_Wakeup(ctx
);
1171 _tnl_draw(ctx
, prims
, nr_prims
, ib
, index_bounds_valid
, min_index
,
1172 max_index
, num_instances
, base_instance
, NULL
, 0);
1176 /* If we're going to have to upload any of the user's vertex arrays, then
1177 * get the minimum and maximum of their index buffer so we know what range
1180 if (!index_bounds_valid
&& _mesa_draw_user_array_bits(ctx
) != 0) {
1181 perf_debug("Scanning index buffer to compute index buffer bounds. "
1182 "Use glDrawRangeElements() to avoid this.\n");
1183 vbo_get_minmax_indices(ctx
, prims
, ib
, &min_index
, &max_index
, nr_prims
);
1184 index_bounds_valid
= true;
1187 brw_prepare_drawing(ctx
, ib
, index_bounds_valid
, min_index
, max_index
);
1188 /* Try drawing with the hardware, but don't do anything else if we can't
1189 * manage it. swrast doesn't support our featureset, so we can't fall back
1193 for (i
= 0; i
< nr_prims
; i
++) {
1194 /* Implementation of ARB_indirect_parameters via predicates */
1195 if (brw
->draw
.draw_params_count_bo
) {
1196 brw_emit_pipe_control_flush(brw
, PIPE_CONTROL_FLUSH_ENABLE
);
1198 /* Upload the current draw count from the draw parameters buffer to
1199 * MI_PREDICATE_SRC0.
1201 brw_load_register_mem(brw
, MI_PREDICATE_SRC0
,
1202 brw
->draw
.draw_params_count_bo
,
1203 brw
->draw
.draw_params_count_offset
);
1204 /* Zero the top 32-bits of MI_PREDICATE_SRC0 */
1205 brw_load_register_imm32(brw
, MI_PREDICATE_SRC0
+ 4, 0);
1206 /* Upload the id of the current primitive to MI_PREDICATE_SRC1. */
1207 brw_load_register_imm64(brw
, MI_PREDICATE_SRC1
, prims
[i
].draw_id
);
1210 if (i
== 0 && brw
->predicate
.state
!= BRW_PREDICATE_STATE_USE_BIT
) {
1211 OUT_BATCH(GEN7_MI_PREDICATE
| MI_PREDICATE_LOADOP_LOADINV
|
1212 MI_PREDICATE_COMBINEOP_SET
|
1213 MI_PREDICATE_COMPAREOP_SRCS_EQUAL
);
1215 OUT_BATCH(GEN7_MI_PREDICATE
|
1216 MI_PREDICATE_LOADOP_LOAD
| MI_PREDICATE_COMBINEOP_XOR
|
1217 MI_PREDICATE_COMPAREOP_SRCS_EQUAL
);
1221 brw
->predicate
.state
= BRW_PREDICATE_STATE_USE_BIT
;
1224 brw_draw_single_prim(ctx
, &prims
[i
], i
, ib
!= NULL
, num_instances
,
1225 base_instance
, xfb_obj
, stream
,
1226 brw
->draw
.draw_indirect_offset
+
1227 brw
->draw
.draw_indirect_stride
* i
);
1230 brw_finish_drawing(ctx
);
1231 brw
->predicate
.state
= predicate_state
;
1235 brw_draw_indirect_prims(struct gl_context
*ctx
,
1237 struct gl_buffer_object
*indirect_data
,
1238 GLsizeiptr indirect_offset
,
1239 unsigned draw_count
,
1241 struct gl_buffer_object
*indirect_params
,
1242 GLsizeiptr indirect_params_offset
,
1243 const struct _mesa_index_buffer
*ib
)
1245 struct brw_context
*brw
= brw_context(ctx
);
1246 struct _mesa_prim
*prim
;
1249 prim
= calloc(draw_count
, sizeof(*prim
));
1251 _mesa_error(ctx
, GL_OUT_OF_MEMORY
, "gl%sDraw%sIndirect%s",
1252 (draw_count
> 1) ? "Multi" : "",
1253 ib
? "Elements" : "Arrays",
1254 indirect_params
? "CountARB" : "");
1258 brw
->draw
.draw_indirect_stride
= stride
;
1259 brw
->draw
.draw_indirect_offset
= indirect_offset
;
1262 prim
[draw_count
- 1].end
= 1;
1263 for (i
= 0; i
< draw_count
; ++i
) {
1264 prim
[i
].mode
= mode
;
1265 prim
[i
].draw_id
= i
;
1268 if (indirect_params
) {
1269 brw
->draw
.draw_params_count_bo
=
1270 intel_buffer_object(indirect_params
)->buffer
;
1271 brw_bo_reference(brw
->draw
.draw_params_count_bo
);
1272 brw
->draw
.draw_params_count_offset
= indirect_params_offset
;
1275 brw
->draw
.draw_indirect_data
= indirect_data
;
1277 brw_draw_prims(ctx
, prim
, draw_count
, ib
, false, 0, ~0, 0, 0, NULL
, 0);
1279 brw
->draw
.draw_indirect_data
= NULL
;
1284 brw_init_draw_functions(struct dd_function_table
*functions
)
1286 /* Register our drawing function:
1288 functions
->Draw
= brw_draw_prims
;
1289 functions
->DrawIndirect
= brw_draw_indirect_prims
;
1293 brw_draw_init(struct brw_context
*brw
)
1295 for (int i
= 0; i
< VERT_ATTRIB_MAX
; i
++)
1296 brw
->vb
.inputs
[i
].buffer
= -1;
1297 brw
->vb
.nr_buffers
= 0;
1298 brw
->vb
.nr_enabled
= 0;
1302 brw_draw_destroy(struct brw_context
*brw
)
1306 for (i
= 0; i
< brw
->vb
.nr_buffers
; i
++) {
1307 brw_bo_unreference(brw
->vb
.buffers
[i
].bo
);
1308 brw
->vb
.buffers
[i
].bo
= NULL
;
1310 brw
->vb
.nr_buffers
= 0;
1312 for (i
= 0; i
< brw
->vb
.nr_enabled
; i
++) {
1313 brw
->vb
.enabled
[i
]->buffer
= -1;
1315 brw
->vb
.nr_enabled
= 0;
1317 brw_bo_unreference(brw
->ib
.bo
);