2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
24 * @file iris_resolve.c
26 * This file handles resolve tracking for main and auxiliary surfaces.
28 * It also handles our cache tracking. We have sets for the render cache,
29 * depth cache, and so on. If a BO is in a cache's set, then it may have
30 * data in that cache. The helpers take care of emitting flushes for
31 * render-to-texture, format reinterpretation issues, and other situations.
34 #include "util/hash_table.h"
36 #include "iris_context.h"
37 #include "compiler/nir/nir.h"
40 * Disable auxiliary buffers if a renderbuffer is also bound as a texture
41 * or shader image. This causes a self-dependency, where both rendering
42 * and sampling may concurrently read or write the CCS buffer, causing
46 disable_rb_aux_buffer(struct iris_context
*ice
,
47 bool *draw_aux_buffer_disabled
,
48 struct iris_resource
*tex_res
,
49 unsigned min_level
, unsigned num_levels
,
52 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
55 /* We only need to worry about color compression and fast clears. */
56 if (tex_res
->aux
.usage
!= ISL_AUX_USAGE_CCS_D
&&
57 tex_res
->aux
.usage
!= ISL_AUX_USAGE_CCS_E
)
60 for (unsigned i
= 0; i
< cso_fb
->nr_cbufs
; i
++) {
61 struct iris_surface
*surf
= (void *) cso_fb
->cbufs
[i
];
65 struct iris_resource
*rb_res
= (void *) surf
->base
.texture
;
67 if (rb_res
->bo
== tex_res
->bo
&&
68 surf
->base
.u
.tex
.level
>= min_level
&&
69 surf
->base
.u
.tex
.level
< min_level
+ num_levels
) {
70 found
= draw_aux_buffer_disabled
[i
] = true;
76 "Disabling CCS because a renderbuffer is also bound %s.\n",
84 resolve_sampler_views(struct iris_context
*ice
,
85 struct iris_batch
*batch
,
86 struct iris_shader_state
*shs
,
87 const struct shader_info
*info
,
88 bool *draw_aux_buffer_disabled
,
89 bool consider_framebuffer
)
91 uint32_t views
= info
? (shs
->bound_sampler_views
& info
->textures_used
) : 0;
94 const int i
= u_bit_scan(&views
);
95 struct iris_sampler_view
*isv
= shs
->textures
[i
];
96 struct iris_resource
*res
= (void *) isv
->base
.texture
;
98 if (res
->base
.target
!= PIPE_BUFFER
) {
99 if (consider_framebuffer
) {
100 disable_rb_aux_buffer(ice
, draw_aux_buffer_disabled
,
101 res
, isv
->view
.base_level
, isv
->view
.levels
,
105 iris_resource_prepare_texture(ice
, batch
, res
, isv
->view
.format
,
106 isv
->view
.base_level
, isv
->view
.levels
,
107 isv
->view
.base_array_layer
,
108 isv
->view
.array_len
);
111 iris_cache_flush_for_read(batch
, res
->bo
);
116 resolve_image_views(struct iris_context
*ice
,
117 struct iris_batch
*batch
,
118 struct iris_shader_state
*shs
,
119 bool *draw_aux_buffer_disabled
,
120 bool consider_framebuffer
)
122 /* TODO: Consider images used by program */
123 uint32_t views
= shs
->bound_image_views
;
126 const int i
= u_bit_scan(&views
);
127 struct pipe_image_view
*pview
= &shs
->image
[i
].base
;
128 struct iris_resource
*res
= (void *) pview
->resource
;
130 if (res
->base
.target
!= PIPE_BUFFER
) {
131 if (consider_framebuffer
) {
132 disable_rb_aux_buffer(ice
, draw_aux_buffer_disabled
,
133 res
, pview
->u
.tex
.level
, 1,
134 "as a shader image");
137 unsigned num_layers
=
138 pview
->u
.tex
.last_layer
- pview
->u
.tex
.first_layer
+ 1;
140 /* The data port doesn't understand any compression */
141 iris_resource_prepare_access(ice
, batch
, res
,
142 pview
->u
.tex
.level
, 1,
143 pview
->u
.tex
.first_layer
, num_layers
,
144 ISL_AUX_USAGE_NONE
, false);
147 iris_cache_flush_for_read(batch
, res
->bo
);
153 * \brief Resolve buffers before drawing.
155 * Resolve the depth buffer's HiZ buffer, resolve the depth buffer of each
156 * enabled depth texture, and flush the render cache for any dirty textures.
159 iris_predraw_resolve_inputs(struct iris_context
*ice
,
160 struct iris_batch
*batch
,
161 bool *draw_aux_buffer_disabled
,
162 gl_shader_stage stage
,
163 bool consider_framebuffer
)
165 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
166 const struct shader_info
*info
= iris_get_shader_info(ice
, stage
);
168 uint64_t dirty
= (IRIS_DIRTY_BINDINGS_VS
<< stage
) |
169 (consider_framebuffer
? IRIS_DIRTY_BINDINGS_FS
: 0);
171 if (ice
->state
.dirty
& dirty
) {
172 resolve_sampler_views(ice
, batch
, shs
, info
, draw_aux_buffer_disabled
,
173 consider_framebuffer
);
174 resolve_image_views(ice
, batch
, shs
, draw_aux_buffer_disabled
,
175 consider_framebuffer
);
180 iris_predraw_resolve_framebuffer(struct iris_context
*ice
,
181 struct iris_batch
*batch
,
182 bool *draw_aux_buffer_disabled
)
184 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
185 struct iris_screen
*screen
= (void *) ice
->ctx
.screen
;
186 struct gen_device_info
*devinfo
= &screen
->devinfo
;
187 struct iris_uncompiled_shader
*ish
=
188 ice
->shaders
.uncompiled
[MESA_SHADER_FRAGMENT
];
189 const nir_shader
*nir
= ish
->nir
;
191 if (ice
->state
.dirty
& IRIS_DIRTY_DEPTH_BUFFER
) {
192 struct pipe_surface
*zs_surf
= cso_fb
->zsbuf
;
195 struct iris_resource
*z_res
, *s_res
;
196 iris_get_depth_stencil_resources(zs_surf
->texture
, &z_res
, &s_res
);
197 unsigned num_layers
=
198 zs_surf
->u
.tex
.last_layer
- zs_surf
->u
.tex
.first_layer
+ 1;
201 iris_resource_prepare_depth(ice
, batch
, z_res
,
202 zs_surf
->u
.tex
.level
,
203 zs_surf
->u
.tex
.first_layer
,
205 iris_cache_flush_for_depth(batch
, z_res
->bo
);
209 iris_cache_flush_for_depth(batch
, s_res
->bo
);
214 if (devinfo
->gen
== 8 && nir
->info
.outputs_read
!= 0) {
215 for (unsigned i
= 0; i
< cso_fb
->nr_cbufs
; i
++) {
216 if (cso_fb
->cbufs
[i
]) {
217 struct iris_surface
*surf
= (void *) cso_fb
->cbufs
[i
];
218 struct iris_resource
*res
= (void *) cso_fb
->cbufs
[i
]->texture
;
220 iris_resource_prepare_texture(ice
, batch
, res
, surf
->view
.format
,
221 surf
->view
.base_level
, 1,
222 surf
->view
.base_array_layer
,
223 surf
->view
.array_len
);
228 if (ice
->state
.dirty
& (IRIS_DIRTY_BINDINGS_FS
| IRIS_DIRTY_BLEND_STATE
)) {
229 for (unsigned i
= 0; i
< cso_fb
->nr_cbufs
; i
++) {
230 struct iris_surface
*surf
= (void *) cso_fb
->cbufs
[i
];
234 struct iris_resource
*res
= (void *) surf
->base
.texture
;
236 enum isl_aux_usage aux_usage
=
237 iris_resource_render_aux_usage(ice
, res
, surf
->view
.format
,
238 ice
->state
.blend_enables
& (1u << i
),
239 draw_aux_buffer_disabled
[i
]);
241 if (ice
->state
.draw_aux_usage
[i
] != aux_usage
) {
242 ice
->state
.draw_aux_usage
[i
] = aux_usage
;
243 /* XXX: Need to track which bindings to make dirty */
244 ice
->state
.dirty
|= IRIS_ALL_DIRTY_BINDINGS
;
247 iris_resource_prepare_render(ice
, batch
, res
, surf
->view
.base_level
,
248 surf
->view
.base_array_layer
,
249 surf
->view
.array_len
,
252 iris_cache_flush_for_render(batch
, res
->bo
, surf
->view
.format
,
259 * \brief Call this after drawing to mark which buffers need resolving
261 * If the depth buffer was written to and if it has an accompanying HiZ
262 * buffer, then mark that it needs a depth resolve.
264 * If the color buffer is a multisample window system buffer, then
265 * mark that it needs a downsample.
267 * Also mark any render targets which will be textured as needing a render
271 iris_postdraw_update_resolve_tracking(struct iris_context
*ice
,
272 struct iris_batch
*batch
)
274 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
276 // XXX: front buffer drawing?
278 bool may_have_resolved_depth
=
279 ice
->state
.dirty
& (IRIS_DIRTY_DEPTH_BUFFER
|
280 IRIS_DIRTY_WM_DEPTH_STENCIL
);
282 struct pipe_surface
*zs_surf
= cso_fb
->zsbuf
;
284 struct iris_resource
*z_res
, *s_res
;
285 iris_get_depth_stencil_resources(zs_surf
->texture
, &z_res
, &s_res
);
286 unsigned num_layers
=
287 zs_surf
->u
.tex
.last_layer
- zs_surf
->u
.tex
.first_layer
+ 1;
290 if (may_have_resolved_depth
) {
291 iris_resource_finish_depth(ice
, z_res
, zs_surf
->u
.tex
.level
,
292 zs_surf
->u
.tex
.first_layer
, num_layers
,
293 ice
->state
.depth_writes_enabled
);
296 if (ice
->state
.depth_writes_enabled
)
297 iris_depth_cache_add_bo(batch
, z_res
->bo
);
301 if (may_have_resolved_depth
&& ice
->state
.stencil_writes_enabled
) {
302 iris_resource_finish_write(ice
, s_res
, zs_surf
->u
.tex
.level
,
303 zs_surf
->u
.tex
.first_layer
, num_layers
,
307 if (ice
->state
.stencil_writes_enabled
)
308 iris_depth_cache_add_bo(batch
, s_res
->bo
);
312 bool may_have_resolved_color
=
313 ice
->state
.dirty
& (IRIS_DIRTY_BINDINGS_FS
| IRIS_DIRTY_BLEND_STATE
);
315 for (unsigned i
= 0; i
< cso_fb
->nr_cbufs
; i
++) {
316 struct iris_surface
*surf
= (void *) cso_fb
->cbufs
[i
];
320 struct iris_resource
*res
= (void *) surf
->base
.texture
;
321 enum isl_aux_usage aux_usage
= ice
->state
.draw_aux_usage
[i
];
323 iris_render_cache_add_bo(batch
, res
->bo
, surf
->view
.format
,
326 if (may_have_resolved_color
) {
327 union pipe_surface_desc
*desc
= &surf
->base
.u
;
328 unsigned num_layers
=
329 desc
->tex
.last_layer
- desc
->tex
.first_layer
+ 1;
330 iris_resource_finish_render(ice
, res
, desc
->tex
.level
,
331 desc
->tex
.first_layer
, num_layers
,
338 * Clear the cache-tracking sets.
341 iris_cache_sets_clear(struct iris_batch
*batch
)
343 hash_table_foreach(batch
->cache
.render
, render_entry
)
344 _mesa_hash_table_remove(batch
->cache
.render
, render_entry
);
346 set_foreach(batch
->cache
.depth
, depth_entry
)
347 _mesa_set_remove(batch
->cache
.depth
, depth_entry
);
351 * Emits an appropriate flush for a BO if it has been rendered to within the
352 * same batchbuffer as a read that's about to be emitted.
354 * The GPU has separate, incoherent caches for the render cache and the
355 * sampler cache, along with other caches. Usually data in the different
356 * caches don't interact (e.g. we don't render to our driver-generated
357 * immediate constant data), but for render-to-texture in FBOs we definitely
358 * do. When a batchbuffer is flushed, the kernel will ensure that everything
359 * necessary is flushed before another use of that BO, but for reuse from
360 * different caches within a batchbuffer, it's all our responsibility.
363 iris_flush_depth_and_render_caches(struct iris_batch
*batch
)
365 iris_emit_pipe_control_flush(batch
,
366 "cache tracker: render-to-texture",
367 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
368 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
369 PIPE_CONTROL_CS_STALL
);
371 iris_emit_pipe_control_flush(batch
,
372 "cache tracker: render-to-texture",
373 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
|
374 PIPE_CONTROL_CONST_CACHE_INVALIDATE
);
376 iris_cache_sets_clear(batch
);
380 iris_cache_flush_for_read(struct iris_batch
*batch
,
383 if (_mesa_hash_table_search_pre_hashed(batch
->cache
.render
, bo
->hash
, bo
) ||
384 _mesa_set_search_pre_hashed(batch
->cache
.depth
, bo
->hash
, bo
))
385 iris_flush_depth_and_render_caches(batch
);
389 format_aux_tuple(enum isl_format format
, enum isl_aux_usage aux_usage
)
391 return (void *)(uintptr_t)((uint32_t)format
<< 8 | aux_usage
);
395 iris_cache_flush_for_render(struct iris_batch
*batch
,
397 enum isl_format format
,
398 enum isl_aux_usage aux_usage
)
400 if (_mesa_set_search_pre_hashed(batch
->cache
.depth
, bo
->hash
, bo
))
401 iris_flush_depth_and_render_caches(batch
);
403 /* Check to see if this bo has been used by a previous rendering operation
404 * but with a different format or aux usage. If it has, flush the render
405 * cache so we ensure that it's only in there with one format or aux usage
408 * Even though it's not obvious, this can easily happen in practice.
409 * Suppose a client is blending on a surface with sRGB encode enabled on
410 * gen9. This implies that you get AUX_USAGE_CCS_D at best. If the client
411 * then disables sRGB decode and continues blending we will flip on
412 * AUX_USAGE_CCS_E without doing any sort of resolve in-between (this is
413 * perfectly valid since CCS_E is a subset of CCS_D). However, this means
414 * that we have fragments in-flight which are rendering with UNORM+CCS_E
415 * and other fragments in-flight with SRGB+CCS_D on the same surface at the
416 * same time and the pixel scoreboard and color blender are trying to sort
417 * it all out. This ends badly (i.e. GPU hangs).
419 * To date, we have never observed GPU hangs or even corruption to be
420 * associated with switching the format, only the aux usage. However,
421 * there are comments in various docs which indicate that the render cache
422 * isn't 100% resilient to format changes. We may as well be conservative
423 * and flush on format changes too. We can always relax this later if we
424 * find it to be a performance problem.
426 struct hash_entry
*entry
=
427 _mesa_hash_table_search_pre_hashed(batch
->cache
.render
, bo
->hash
, bo
);
428 if (entry
&& entry
->data
!= format_aux_tuple(format
, aux_usage
))
429 iris_flush_depth_and_render_caches(batch
);
433 iris_render_cache_add_bo(struct iris_batch
*batch
,
435 enum isl_format format
,
436 enum isl_aux_usage aux_usage
)
439 struct hash_entry
*entry
=
440 _mesa_hash_table_search_pre_hashed(batch
->cache
.render
, bo
->hash
, bo
);
442 /* Otherwise, someone didn't do a flush_for_render and that would be
445 assert(entry
->data
== format_aux_tuple(format
, aux_usage
));
449 _mesa_hash_table_insert_pre_hashed(batch
->cache
.render
, bo
->hash
, bo
,
450 format_aux_tuple(format
, aux_usage
));
454 iris_cache_flush_for_depth(struct iris_batch
*batch
,
457 if (_mesa_hash_table_search_pre_hashed(batch
->cache
.render
, bo
->hash
, bo
))
458 iris_flush_depth_and_render_caches(batch
);
462 iris_depth_cache_add_bo(struct iris_batch
*batch
, struct iris_bo
*bo
)
464 _mesa_set_add_pre_hashed(batch
->cache
.depth
, bo
->hash
, bo
);
468 iris_resolve_color(struct iris_context
*ice
,
469 struct iris_batch
*batch
,
470 struct iris_resource
*res
,
471 unsigned level
, unsigned layer
,
472 enum isl_aux_op resolve_op
)
474 //DBG("%s to mt %p level %u layer %u\n", __FUNCTION__, mt, level, layer);
476 struct blorp_surf surf
;
477 iris_blorp_surf_for_resource(&ice
->vtbl
, &batch
->screen
->isl_dev
, &surf
,
478 &res
->base
, res
->aux
.usage
, level
, true);
480 iris_batch_maybe_flush(batch
, 1500);
482 /* Ivybridge PRM Vol 2, Part 1, "11.7 MCS Buffer for Render Target(s)":
484 * "Any transition from any value in {Clear, Render, Resolve} to a
485 * different value in {Clear, Render, Resolve} requires end of pipe
488 * In other words, fast clear ops are not properly synchronized with
489 * other drawing. We need to use a PIPE_CONTROL to ensure that the
490 * contents of the previous draw hit the render target before we resolve
491 * and again afterwards to ensure that the resolve is complete before we
492 * do any more regular drawing.
494 iris_emit_end_of_pipe_sync(batch
, "color resolve: pre-flush",
495 PIPE_CONTROL_RENDER_TARGET_FLUSH
);
497 struct blorp_batch blorp_batch
;
498 blorp_batch_init(&ice
->blorp
, &blorp_batch
, batch
, 0);
499 /* On Gen >= 12, Stencil buffer with lossless compression needs to be
500 * resolve with WM_HZ_OP packet.
502 if (isl_surf_usage_is_stencil(res
->surf
.usage
)) {
503 blorp_hiz_stencil_op(&blorp_batch
, &surf
, level
, layer
,
506 blorp_ccs_resolve(&blorp_batch
, &surf
, level
, layer
, 1,
507 isl_format_srgb_to_linear(res
->surf
.format
),
510 blorp_batch_finish(&blorp_batch
);
512 /* See comment above */
513 iris_emit_end_of_pipe_sync(batch
, "color resolve: post-flush",
514 PIPE_CONTROL_RENDER_TARGET_FLUSH
);
518 iris_mcs_partial_resolve(struct iris_context
*ice
,
519 struct iris_batch
*batch
,
520 struct iris_resource
*res
,
521 uint32_t start_layer
,
524 //DBG("%s to mt %p layers %u-%u\n", __FUNCTION__, mt,
525 //start_layer, start_layer + num_layers - 1);
527 assert(isl_aux_usage_has_mcs(res
->aux
.usage
));
529 struct blorp_surf surf
;
530 iris_blorp_surf_for_resource(&ice
->vtbl
, &batch
->screen
->isl_dev
, &surf
,
531 &res
->base
, res
->aux
.usage
, 0, true);
533 struct blorp_batch blorp_batch
;
534 blorp_batch_init(&ice
->blorp
, &blorp_batch
, batch
, 0);
535 blorp_mcs_partial_resolve(&blorp_batch
, &surf
,
536 isl_format_srgb_to_linear(res
->surf
.format
),
537 start_layer
, num_layers
);
538 blorp_batch_finish(&blorp_batch
);
543 * Return true if the format that will be used to access the resource is
544 * CCS_E-compatible with the resource's linear/non-sRGB format.
546 * Why use the linear format? Well, although the resourcemay be specified
547 * with an sRGB format, the usage of that color space/format can be toggled.
548 * Since our HW tends to support more linear formats than sRGB ones, we use
549 * this format variant for check for CCS_E compatibility.
552 format_ccs_e_compat_with_resource(const struct gen_device_info
*devinfo
,
553 const struct iris_resource
*res
,
554 enum isl_format access_format
)
556 assert(res
->aux
.usage
== ISL_AUX_USAGE_CCS_E
);
558 enum isl_format isl_format
= isl_format_srgb_to_linear(res
->surf
.format
);
559 return isl_formats_are_ccs_e_compatible(devinfo
, isl_format
, access_format
);
563 iris_sample_with_depth_aux(const struct gen_device_info
*devinfo
,
564 const struct iris_resource
*res
)
566 switch (res
->aux
.usage
) {
567 case ISL_AUX_USAGE_HIZ
:
568 if (devinfo
->has_sample_with_hiz
)
571 case ISL_AUX_USAGE_HIZ_CCS
:
573 case ISL_AUX_USAGE_HIZ_CCS_WT
:
579 /* It seems the hardware won't fallback to the depth buffer if some of the
580 * mipmap levels aren't available in the HiZ buffer. So we need all levels
581 * of the texture to be HiZ enabled.
583 for (unsigned level
= 0; level
< res
->surf
.levels
; ++level
) {
584 if (!iris_resource_level_has_hiz(res
, level
))
588 /* If compressed multisampling is enabled, then we use it for the auxiliary
591 * From the BDW PRM (Volume 2d: Command Reference: Structures
592 * RENDER_SURFACE_STATE.AuxiliarySurfaceMode):
594 * "If this field is set to AUX_HIZ, Number of Multisamples must be
595 * MULTISAMPLECOUNT_1, and Surface Type cannot be SURFTYPE_3D.
597 * There is no such blurb for 1D textures, but there is sufficient evidence
598 * that this is broken on SKL+.
600 // XXX: i965 disables this for arrays too, is that reasonable?
601 return res
->surf
.samples
== 1 && res
->surf
.dim
== ISL_SURF_DIM_2D
;
605 * Perform a HiZ or depth resolve operation.
607 * For an overview of HiZ ops, see the following sections of the Sandy Bridge
608 * PRM, Volume 1, Part 2:
609 * - 7.5.3.1 Depth Buffer Clear
610 * - 7.5.3.2 Depth Buffer Resolve
611 * - 7.5.3.3 Hierarchical Depth Buffer Resolve
614 iris_hiz_exec(struct iris_context
*ice
,
615 struct iris_batch
*batch
,
616 struct iris_resource
*res
,
617 unsigned int level
, unsigned int start_layer
,
618 unsigned int num_layers
, enum isl_aux_op op
,
619 bool update_clear_depth
)
621 assert(iris_resource_level_has_hiz(res
, level
));
622 assert(op
!= ISL_AUX_OP_NONE
);
623 UNUSED
const char *name
= NULL
;
626 case ISL_AUX_OP_FULL_RESOLVE
:
627 name
= "depth resolve";
629 case ISL_AUX_OP_AMBIGUATE
:
630 name
= "hiz ambiguate";
632 case ISL_AUX_OP_FAST_CLEAR
:
633 name
= "depth clear";
635 case ISL_AUX_OP_PARTIAL_RESOLVE
:
636 case ISL_AUX_OP_NONE
:
637 unreachable("Invalid HiZ op");
640 //DBG("%s %s to mt %p level %d layers %d-%d\n",
641 //__func__, name, mt, level, start_layer, start_layer + num_layers - 1);
643 /* The following stalls and flushes are only documented to be required
644 * for HiZ clear operations. However, they also seem to be required for
645 * resolve operations.
647 * From the Ivybridge PRM, volume 2, "Depth Buffer Clear":
649 * "If other rendering operations have preceded this clear, a
650 * PIPE_CONTROL with depth cache flush enabled, Depth Stall bit
651 * enabled must be issued before the rectangle primitive used for
652 * the depth buffer clear operation."
654 * Same applies for Gen8 and Gen9.
656 * In addition, from the Ivybridge PRM, volume 2, 1.10.4.1
657 * PIPE_CONTROL, Depth Cache Flush Enable:
659 * "This bit must not be set when Depth Stall Enable bit is set in
662 * This is confirmed to hold for real, Haswell gets immediate gpu hangs.
664 * Therefore issue two pipe control flushes, one for cache flush and
665 * another for depth stall.
667 iris_emit_pipe_control_flush(batch
,
668 "hiz op: pre-flushes (1/2)",
669 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
670 PIPE_CONTROL_CS_STALL
);
672 iris_emit_pipe_control_flush(batch
, "hiz op: pre-flushes (2/2)",
673 PIPE_CONTROL_DEPTH_STALL
);
675 assert(isl_aux_usage_has_hiz(res
->aux
.usage
) && res
->aux
.bo
);
677 iris_batch_maybe_flush(batch
, 1500);
679 struct blorp_surf surf
;
680 iris_blorp_surf_for_resource(&ice
->vtbl
, &batch
->screen
->isl_dev
, &surf
,
681 &res
->base
, res
->aux
.usage
, level
, true);
683 struct blorp_batch blorp_batch
;
684 enum blorp_batch_flags flags
= 0;
685 flags
|= update_clear_depth
? 0 : BLORP_BATCH_NO_UPDATE_CLEAR_COLOR
;
686 blorp_batch_init(&ice
->blorp
, &blorp_batch
, batch
, flags
);
687 blorp_hiz_op(&blorp_batch
, &surf
, level
, start_layer
, num_layers
, op
);
688 blorp_batch_finish(&blorp_batch
);
690 /* The following stalls and flushes are only documented to be required
691 * for HiZ clear operations. However, they also seem to be required for
692 * resolve operations.
694 * From the Broadwell PRM, volume 7, "Depth Buffer Clear":
696 * "Depth buffer clear pass using any of the methods (WM_STATE,
697 * 3DSTATE_WM or 3DSTATE_WM_HZ_OP) must be followed by a
698 * PIPE_CONTROL command with DEPTH_STALL bit and Depth FLUSH bits
699 * "set" before starting to render. DepthStall and DepthFlush are
700 * not needed between consecutive depth clear passes nor is it
701 * required if the depth clear pass was done with
702 * 'full_surf_clear' bit set in the 3DSTATE_WM_HZ_OP."
704 * TODO: Such as the spec says, this could be conditional.
706 iris_emit_pipe_control_flush(batch
,
707 "hiz op: post flush",
708 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
709 PIPE_CONTROL_DEPTH_STALL
);
713 level_has_aux(const struct iris_resource
*res
, uint32_t level
)
715 return isl_aux_usage_has_hiz(res
->aux
.usage
) ?
716 iris_resource_level_has_hiz(res
, level
) :
717 res
->aux
.usage
!= ISL_AUX_USAGE_NONE
;
721 * Does the resource's slice have hiz enabled?
724 iris_resource_level_has_hiz(const struct iris_resource
*res
, uint32_t level
)
726 iris_resource_check_level_layer(res
, level
, 0);
727 return res
->aux
.has_hiz
& 1 << level
;
730 /** \brief Assert that the level and layer are valid for the resource. */
732 iris_resource_check_level_layer(UNUSED
const struct iris_resource
*res
,
733 UNUSED
uint32_t level
, UNUSED
uint32_t layer
)
735 assert(level
< res
->surf
.levels
);
736 assert(layer
< util_num_layers(&res
->base
, level
));
739 static inline uint32_t
740 miptree_level_range_length(const struct iris_resource
*res
,
741 uint32_t start_level
, uint32_t num_levels
)
743 assert(start_level
< res
->surf
.levels
);
745 if (num_levels
== INTEL_REMAINING_LAYERS
)
746 num_levels
= res
->surf
.levels
;
748 /* Check for overflow */
749 assert(start_level
+ num_levels
>= start_level
);
750 assert(start_level
+ num_levels
<= res
->surf
.levels
);
755 static inline uint32_t
756 miptree_layer_range_length(const struct iris_resource
*res
, uint32_t level
,
757 uint32_t start_layer
, uint32_t num_layers
)
759 assert(level
<= res
->base
.last_level
);
761 const uint32_t total_num_layers
= iris_get_num_logical_layers(res
, level
);
762 assert(start_layer
< total_num_layers
);
763 if (num_layers
== INTEL_REMAINING_LAYERS
)
764 num_layers
= total_num_layers
- start_layer
;
765 /* Check for overflow */
766 assert(start_layer
+ num_layers
>= start_layer
);
767 assert(start_layer
+ num_layers
<= total_num_layers
);
773 iris_has_color_unresolved(const struct iris_resource
*res
,
774 unsigned start_level
, unsigned num_levels
,
775 unsigned start_layer
, unsigned num_layers
)
780 /* Clamp the level range to fit the resource */
781 num_levels
= miptree_level_range_length(res
, start_level
, num_levels
);
783 for (uint32_t l
= 0; l
< num_levels
; l
++) {
784 const uint32_t level
= start_level
+ l
;
785 const uint32_t level_layers
=
786 miptree_layer_range_length(res
, level
, start_layer
, num_layers
);
787 for (unsigned a
= 0; a
< level_layers
; a
++) {
788 enum isl_aux_state aux_state
=
789 iris_resource_get_aux_state(res
, level
, start_layer
+ a
);
790 assert(aux_state
!= ISL_AUX_STATE_AUX_INVALID
);
791 if (aux_state
!= ISL_AUX_STATE_PASS_THROUGH
)
800 iris_resource_prepare_access(struct iris_context
*ice
,
801 struct iris_batch
*batch
,
802 struct iris_resource
*res
,
803 uint32_t start_level
, uint32_t num_levels
,
804 uint32_t start_layer
, uint32_t num_layers
,
805 enum isl_aux_usage aux_usage
,
806 bool fast_clear_supported
)
808 const uint32_t clamped_levels
=
809 miptree_level_range_length(res
, start_level
, num_levels
);
810 for (uint32_t l
= 0; l
< clamped_levels
; l
++) {
811 const uint32_t level
= start_level
+ l
;
812 if (!level_has_aux(res
, level
))
815 const uint32_t level_layers
=
816 miptree_layer_range_length(res
, level
, start_layer
, num_layers
);
817 for (uint32_t a
= 0; a
< level_layers
; a
++) {
818 const uint32_t layer
= start_layer
+ a
;
819 const enum isl_aux_state aux_state
=
820 iris_resource_get_aux_state(res
, level
, layer
);
821 const enum isl_aux_op aux_op
=
822 isl_aux_prepare_access(aux_state
, aux_usage
, fast_clear_supported
);
824 if (aux_op
== ISL_AUX_OP_NONE
) {
825 /* Nothing to do here. */
826 } else if (isl_aux_usage_has_mcs(res
->aux
.usage
)) {
827 assert(aux_op
== ISL_AUX_OP_PARTIAL_RESOLVE
);
828 iris_mcs_partial_resolve(ice
, batch
, res
, layer
, 1);
829 } else if (isl_aux_usage_has_hiz(res
->aux
.usage
)) {
830 iris_hiz_exec(ice
, batch
, res
, level
, layer
, 1, aux_op
, false);
832 assert(isl_aux_usage_has_ccs(res
->aux
.usage
));
833 iris_resolve_color(ice
, batch
, res
, level
, layer
, aux_op
);
836 const enum isl_aux_state new_state
=
837 isl_aux_state_transition_aux_op(aux_state
, res
->aux
.usage
, aux_op
);
838 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1, new_state
);
844 iris_resource_finish_write(struct iris_context
*ice
,
845 struct iris_resource
*res
, uint32_t level
,
846 uint32_t start_layer
, uint32_t num_layers
,
847 enum isl_aux_usage aux_usage
)
849 if (!level_has_aux(res
, level
))
852 const uint32_t level_layers
=
853 miptree_layer_range_length(res
, level
, start_layer
, num_layers
);
855 for (uint32_t a
= 0; a
< level_layers
; a
++) {
856 const uint32_t layer
= start_layer
+ a
;
857 const enum isl_aux_state aux_state
=
858 iris_resource_get_aux_state(res
, level
, layer
);
859 const enum isl_aux_state new_aux_state
=
860 isl_aux_state_transition_write(aux_state
, aux_usage
, false);
861 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1, new_aux_state
);
866 iris_resource_get_aux_state(const struct iris_resource
*res
,
867 uint32_t level
, uint32_t layer
)
869 iris_resource_check_level_layer(res
, level
, layer
);
871 if (res
->surf
.usage
& ISL_SURF_USAGE_DEPTH_BIT
) {
872 assert(iris_resource_level_has_hiz(res
, level
));
874 assert(res
->surf
.samples
== 1 ||
875 res
->surf
.msaa_layout
== ISL_MSAA_LAYOUT_ARRAY
);
878 return res
->aux
.state
[level
][layer
];
882 iris_resource_set_aux_state(struct iris_context
*ice
,
883 struct iris_resource
*res
, uint32_t level
,
884 uint32_t start_layer
, uint32_t num_layers
,
885 enum isl_aux_state aux_state
)
887 num_layers
= miptree_layer_range_length(res
, level
, start_layer
, num_layers
);
889 if (res
->surf
.usage
& ISL_SURF_USAGE_DEPTH_BIT
) {
890 assert(iris_resource_level_has_hiz(res
, level
));
892 assert(res
->surf
.samples
== 1 ||
893 res
->surf
.msaa_layout
== ISL_MSAA_LAYOUT_ARRAY
);
896 for (unsigned a
= 0; a
< num_layers
; a
++) {
897 if (res
->aux
.state
[level
][start_layer
+ a
] != aux_state
) {
898 res
->aux
.state
[level
][start_layer
+ a
] = aux_state
;
899 /* XXX: Need to track which bindings to make dirty */
900 ice
->state
.dirty
|= IRIS_ALL_DIRTY_BINDINGS
;
905 /* On Gen9 color buffers may be compressed by the hardware (lossless
906 * compression). There are, however, format restrictions and care needs to be
907 * taken that the sampler engine is capable for re-interpreting a buffer with
908 * format different the buffer was originally written with.
910 * For example, SRGB formats are not compressible and the sampler engine isn't
911 * capable of treating RGBA_UNORM as SRGB_ALPHA. In such a case the underlying
912 * color buffer needs to be resolved so that the sampling surface can be
913 * sampled as non-compressed (i.e., without the auxiliary MCS buffer being
917 can_texture_with_ccs(const struct gen_device_info
*devinfo
,
918 struct pipe_debug_callback
*dbg
,
919 const struct iris_resource
*res
,
920 enum isl_format view_format
)
922 if (res
->aux
.usage
!= ISL_AUX_USAGE_CCS_E
)
925 if (!format_ccs_e_compat_with_resource(devinfo
, res
, view_format
)) {
926 const struct isl_format_layout
*res_fmtl
=
927 isl_format_get_layout(res
->surf
.format
);
928 const struct isl_format_layout
*view_fmtl
=
929 isl_format_get_layout(view_format
);
931 perf_debug(dbg
, "Incompatible sampling format (%s) for CCS (%s)\n",
932 view_fmtl
->name
, res_fmtl
->name
);
941 iris_resource_texture_aux_usage(struct iris_context
*ice
,
942 const struct iris_resource
*res
,
943 enum isl_format view_format
)
945 struct iris_screen
*screen
= (void *) ice
->ctx
.screen
;
946 struct gen_device_info
*devinfo
= &screen
->devinfo
;
948 switch (res
->aux
.usage
) {
949 case ISL_AUX_USAGE_HIZ
:
950 if (iris_sample_with_depth_aux(devinfo
, res
))
951 return ISL_AUX_USAGE_HIZ
;
954 case ISL_AUX_USAGE_HIZ_CCS
:
955 assert(!iris_sample_with_depth_aux(devinfo
, res
));
956 return ISL_AUX_USAGE_NONE
;
958 case ISL_AUX_USAGE_HIZ_CCS_WT
:
959 if (iris_sample_with_depth_aux(devinfo
, res
))
960 return ISL_AUX_USAGE_HIZ_CCS_WT
;
963 case ISL_AUX_USAGE_MCS
:
964 case ISL_AUX_USAGE_MCS_CCS
:
965 return res
->aux
.usage
;
967 case ISL_AUX_USAGE_CCS_D
:
968 case ISL_AUX_USAGE_CCS_E
:
969 /* If we don't have any unresolved color, report an aux usage of
970 * ISL_AUX_USAGE_NONE. This way, texturing won't even look at the
971 * aux surface and we can save some bandwidth.
973 if (!iris_has_color_unresolved(res
, 0, INTEL_REMAINING_LEVELS
,
974 0, INTEL_REMAINING_LAYERS
))
975 return ISL_AUX_USAGE_NONE
;
977 if (can_texture_with_ccs(devinfo
, &ice
->dbg
, res
, view_format
))
978 return ISL_AUX_USAGE_CCS_E
;
985 return ISL_AUX_USAGE_NONE
;
989 isl_formats_are_fast_clear_compatible(enum isl_format a
, enum isl_format b
)
991 /* On gen8 and earlier, the hardware was only capable of handling 0/1 clear
992 * values so sRGB curve application was a no-op for all fast-clearable
995 * On gen9+, the hardware supports arbitrary clear values. For sRGB clear
996 * values, the hardware interprets the floats, not as what would be
997 * returned from the sampler (or written by the shader), but as being
998 * between format conversion and sRGB curve application. This means that
999 * we can switch between sRGB and UNORM without having to whack the clear
1002 return isl_format_srgb_to_linear(a
) == isl_format_srgb_to_linear(b
);
1006 iris_resource_prepare_texture(struct iris_context
*ice
,
1007 struct iris_batch
*batch
,
1008 struct iris_resource
*res
,
1009 enum isl_format view_format
,
1010 uint32_t start_level
, uint32_t num_levels
,
1011 uint32_t start_layer
, uint32_t num_layers
)
1013 enum isl_aux_usage aux_usage
=
1014 iris_resource_texture_aux_usage(ice
, res
, view_format
);
1016 bool clear_supported
= isl_aux_usage_has_fast_clears(aux_usage
);
1018 /* Clear color is specified as ints or floats and the conversion is done by
1019 * the sampler. If we have a texture view, we would have to perform the
1020 * clear color conversion manually. Just disable clear color.
1022 if (!isl_formats_are_fast_clear_compatible(res
->surf
.format
, view_format
))
1023 clear_supported
= false;
1025 iris_resource_prepare_access(ice
, batch
, res
, start_level
, num_levels
,
1026 start_layer
, num_layers
,
1027 aux_usage
, clear_supported
);
1031 iris_resource_render_aux_usage(struct iris_context
*ice
,
1032 struct iris_resource
*res
,
1033 enum isl_format render_format
,
1035 bool draw_aux_disabled
)
1037 struct iris_screen
*screen
= (void *) ice
->ctx
.screen
;
1038 struct gen_device_info
*devinfo
= &screen
->devinfo
;
1040 if (draw_aux_disabled
)
1041 return ISL_AUX_USAGE_NONE
;
1043 switch (res
->aux
.usage
) {
1044 case ISL_AUX_USAGE_MCS
:
1045 case ISL_AUX_USAGE_MCS_CCS
:
1046 return res
->aux
.usage
;
1048 case ISL_AUX_USAGE_CCS_D
:
1049 case ISL_AUX_USAGE_CCS_E
:
1050 /* Gen9+ hardware technically supports non-0/1 clear colors with sRGB
1051 * formats. However, there are issues with blending where it doesn't
1052 * properly apply the sRGB curve to the clear color when blending.
1054 if (devinfo
->gen
>= 9 && blend_enabled
&&
1055 isl_format_is_srgb(render_format
) &&
1056 !isl_color_value_is_zero_one(res
->aux
.clear_color
, render_format
))
1057 return ISL_AUX_USAGE_NONE
;
1059 if (res
->aux
.usage
== ISL_AUX_USAGE_CCS_E
&&
1060 format_ccs_e_compat_with_resource(devinfo
, res
, render_format
))
1061 return ISL_AUX_USAGE_CCS_E
;
1063 /* Otherwise, we try to fall back to CCS_D */
1064 if (isl_format_supports_ccs_d(devinfo
, render_format
))
1065 return ISL_AUX_USAGE_CCS_D
;
1068 return ISL_AUX_USAGE_NONE
;
1073 iris_resource_prepare_render(struct iris_context
*ice
,
1074 struct iris_batch
*batch
,
1075 struct iris_resource
*res
, uint32_t level
,
1076 uint32_t start_layer
, uint32_t layer_count
,
1077 enum isl_aux_usage aux_usage
)
1079 iris_resource_prepare_access(ice
, batch
, res
, level
, 1, start_layer
,
1080 layer_count
, aux_usage
,
1081 isl_aux_usage_has_fast_clears(aux_usage
));
1085 iris_resource_finish_render(struct iris_context
*ice
,
1086 struct iris_resource
*res
, uint32_t level
,
1087 uint32_t start_layer
, uint32_t layer_count
,
1088 enum isl_aux_usage aux_usage
)
1090 iris_resource_finish_write(ice
, res
, level
, start_layer
, layer_count
,
1095 iris_resource_prepare_depth(struct iris_context
*ice
,
1096 struct iris_batch
*batch
,
1097 struct iris_resource
*res
, uint32_t level
,
1098 uint32_t start_layer
, uint32_t layer_count
)
1100 iris_resource_prepare_access(ice
, batch
, res
, level
, 1, start_layer
,
1101 layer_count
, res
->aux
.usage
, !!res
->aux
.bo
);
1105 iris_resource_finish_depth(struct iris_context
*ice
,
1106 struct iris_resource
*res
, uint32_t level
,
1107 uint32_t start_layer
, uint32_t layer_count
,
1110 if (depth_written
) {
1111 iris_resource_finish_write(ice
, res
, level
, start_layer
, layer_count
,