2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
24 * @file iris_resolve.c
26 * This file handles resolve tracking for main and auxiliary surfaces.
28 * It also handles our cache tracking. We have sets for the render cache,
29 * depth cache, and so on. If a BO is in a cache's set, then it may have
30 * data in that cache. The helpers take care of emitting flushes for
31 * render-to-texture, format reinterpretation issues, and other situations.
34 #include "util/hash_table.h"
36 #include "iris_context.h"
39 * Disable auxiliary buffers if a renderbuffer is also bound as a texture
40 * or shader image. This causes a self-dependency, where both rendering
41 * and sampling may concurrently read or write the CCS buffer, causing
45 disable_rb_aux_buffer(struct iris_context
*ice
,
46 bool *draw_aux_buffer_disabled
,
47 struct iris_resource
*tex_res
,
48 unsigned min_level
, unsigned num_levels
,
51 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
54 /* We only need to worry about color compression and fast clears. */
55 if (tex_res
->aux
.usage
!= ISL_AUX_USAGE_CCS_D
&&
56 tex_res
->aux
.usage
!= ISL_AUX_USAGE_CCS_E
)
59 for (unsigned i
= 0; i
< cso_fb
->nr_cbufs
; i
++) {
60 struct iris_surface
*surf
= (void *) cso_fb
->cbufs
[i
];
64 struct iris_resource
*rb_res
= (void *) surf
->base
.texture
;
66 if (rb_res
->bo
== tex_res
->bo
&&
67 surf
->base
.u
.tex
.level
>= min_level
&&
68 surf
->base
.u
.tex
.level
< min_level
+ num_levels
) {
69 found
= draw_aux_buffer_disabled
[i
] = true;
75 "Disabling CCS because a renderbuffer is also bound %s.\n",
83 resolve_sampler_views(struct iris_context
*ice
,
84 struct iris_batch
*batch
,
85 struct iris_shader_state
*shs
,
86 const struct shader_info
*info
,
87 bool *draw_aux_buffer_disabled
,
88 bool consider_framebuffer
)
90 uint32_t views
= info
? (shs
->bound_sampler_views
& info
->textures_used
) : 0;
92 unsigned astc5x5_wa_bits
= 0; // XXX: actual tracking
95 const int i
= u_bit_scan(&views
);
96 struct iris_sampler_view
*isv
= shs
->textures
[i
];
97 struct iris_resource
*res
= (void *) isv
->base
.texture
;
99 if (res
->base
.target
!= PIPE_BUFFER
) {
100 if (consider_framebuffer
) {
101 disable_rb_aux_buffer(ice
, draw_aux_buffer_disabled
,
102 res
, isv
->view
.base_level
, isv
->view
.levels
,
106 iris_resource_prepare_texture(ice
, batch
, res
, isv
->view
.format
,
107 isv
->view
.base_level
, isv
->view
.levels
,
108 isv
->view
.base_array_layer
,
113 iris_cache_flush_for_read(batch
, res
->bo
);
118 resolve_image_views(struct iris_context
*ice
,
119 struct iris_batch
*batch
,
120 struct iris_shader_state
*shs
,
121 bool *draw_aux_buffer_disabled
,
122 bool consider_framebuffer
)
124 /* TODO: Consider images used by program */
125 uint32_t views
= shs
->bound_image_views
;
128 const int i
= u_bit_scan(&views
);
129 struct iris_resource
*res
= (void *) shs
->image
[i
].base
.resource
;
131 if (res
->base
.target
!= PIPE_BUFFER
) {
132 if (consider_framebuffer
) {
133 disable_rb_aux_buffer(ice
, draw_aux_buffer_disabled
,
134 res
, 0, ~0, "as a shader image");
137 iris_resource_prepare_image(ice
, batch
, res
);
140 iris_cache_flush_for_read(batch
, res
->bo
);
146 * \brief Resolve buffers before drawing.
148 * Resolve the depth buffer's HiZ buffer, resolve the depth buffer of each
149 * enabled depth texture, and flush the render cache for any dirty textures.
152 iris_predraw_resolve_inputs(struct iris_context
*ice
,
153 struct iris_batch
*batch
,
154 bool *draw_aux_buffer_disabled
,
155 gl_shader_stage stage
,
156 bool consider_framebuffer
)
158 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
159 const struct shader_info
*info
= iris_get_shader_info(ice
, stage
);
161 uint64_t dirty
= (IRIS_DIRTY_BINDINGS_VS
<< stage
) |
162 (consider_framebuffer
? IRIS_DIRTY_BINDINGS_FS
: 0);
164 if (ice
->state
.dirty
& dirty
) {
165 resolve_sampler_views(ice
, batch
, shs
, info
, draw_aux_buffer_disabled
,
166 consider_framebuffer
);
167 resolve_image_views(ice
, batch
, shs
, draw_aux_buffer_disabled
,
168 consider_framebuffer
);
175 iris_predraw_resolve_framebuffer(struct iris_context
*ice
,
176 struct iris_batch
*batch
,
177 bool *draw_aux_buffer_disabled
)
179 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
181 if (ice
->state
.dirty
& IRIS_DIRTY_DEPTH_BUFFER
) {
182 struct pipe_surface
*zs_surf
= cso_fb
->zsbuf
;
185 struct iris_resource
*z_res
, *s_res
;
186 iris_get_depth_stencil_resources(zs_surf
->texture
, &z_res
, &s_res
);
187 unsigned num_layers
=
188 zs_surf
->u
.tex
.last_layer
- zs_surf
->u
.tex
.first_layer
+ 1;
191 iris_resource_prepare_depth(ice
, batch
, z_res
,
192 zs_surf
->u
.tex
.level
,
193 zs_surf
->u
.tex
.first_layer
,
195 iris_cache_flush_for_depth(batch
, z_res
->bo
);
199 iris_cache_flush_for_depth(batch
, s_res
->bo
);
204 if (ice
->state
.dirty
& (IRIS_DIRTY_BINDINGS_FS
| IRIS_DIRTY_BLEND_STATE
)) {
205 for (unsigned i
= 0; i
< cso_fb
->nr_cbufs
; i
++) {
206 struct iris_surface
*surf
= (void *) cso_fb
->cbufs
[i
];
210 struct iris_resource
*res
= (void *) surf
->base
.texture
;
212 enum isl_aux_usage aux_usage
=
213 iris_resource_render_aux_usage(ice
, res
, surf
->view
.format
,
214 ice
->state
.blend_enables
& (1u << i
),
215 draw_aux_buffer_disabled
[i
]);
217 if (ice
->state
.draw_aux_usage
[i
] != aux_usage
) {
218 ice
->state
.draw_aux_usage
[i
] = aux_usage
;
219 /* XXX: Need to track which bindings to make dirty */
220 ice
->state
.dirty
|= IRIS_ALL_DIRTY_BINDINGS
;
223 iris_resource_prepare_render(ice
, batch
, res
, surf
->view
.base_level
,
224 surf
->view
.base_array_layer
,
225 surf
->view
.array_len
,
228 iris_cache_flush_for_render(batch
, res
->bo
, surf
->view
.format
,
235 * \brief Call this after drawing to mark which buffers need resolving
237 * If the depth buffer was written to and if it has an accompanying HiZ
238 * buffer, then mark that it needs a depth resolve.
240 * If the color buffer is a multisample window system buffer, then
241 * mark that it needs a downsample.
243 * Also mark any render targets which will be textured as needing a render
247 iris_postdraw_update_resolve_tracking(struct iris_context
*ice
,
248 struct iris_batch
*batch
)
250 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
252 // XXX: front buffer drawing?
254 bool may_have_resolved_depth
=
255 ice
->state
.dirty
& (IRIS_DIRTY_DEPTH_BUFFER
|
256 IRIS_DIRTY_WM_DEPTH_STENCIL
);
258 struct pipe_surface
*zs_surf
= cso_fb
->zsbuf
;
260 struct iris_resource
*z_res
, *s_res
;
261 iris_get_depth_stencil_resources(zs_surf
->texture
, &z_res
, &s_res
);
262 unsigned num_layers
=
263 zs_surf
->u
.tex
.last_layer
- zs_surf
->u
.tex
.first_layer
+ 1;
266 if (may_have_resolved_depth
) {
267 iris_resource_finish_depth(ice
, z_res
, zs_surf
->u
.tex
.level
,
268 zs_surf
->u
.tex
.first_layer
, num_layers
,
269 ice
->state
.depth_writes_enabled
);
272 if (ice
->state
.depth_writes_enabled
)
273 iris_depth_cache_add_bo(batch
, z_res
->bo
);
277 if (may_have_resolved_depth
) {
278 iris_resource_finish_write(ice
, s_res
, zs_surf
->u
.tex
.level
,
279 zs_surf
->u
.tex
.first_layer
, num_layers
,
283 if (ice
->state
.stencil_writes_enabled
)
284 iris_depth_cache_add_bo(batch
, s_res
->bo
);
288 bool may_have_resolved_color
=
289 ice
->state
.dirty
& (IRIS_DIRTY_BINDINGS_FS
| IRIS_DIRTY_BLEND_STATE
);
291 for (unsigned i
= 0; i
< cso_fb
->nr_cbufs
; i
++) {
292 struct iris_surface
*surf
= (void *) cso_fb
->cbufs
[i
];
296 struct iris_resource
*res
= (void *) surf
->base
.texture
;
297 enum isl_aux_usage aux_usage
= ice
->state
.draw_aux_usage
[i
];
299 iris_render_cache_add_bo(batch
, res
->bo
, surf
->view
.format
,
302 if (may_have_resolved_color
) {
303 union pipe_surface_desc
*desc
= &surf
->base
.u
;
304 unsigned num_layers
=
305 desc
->tex
.last_layer
- desc
->tex
.first_layer
+ 1;
306 iris_resource_finish_render(ice
, res
, desc
->tex
.level
,
307 desc
->tex
.first_layer
, num_layers
,
314 * Clear the cache-tracking sets.
317 iris_cache_sets_clear(struct iris_batch
*batch
)
319 hash_table_foreach(batch
->cache
.render
, render_entry
)
320 _mesa_hash_table_remove(batch
->cache
.render
, render_entry
);
322 set_foreach(batch
->cache
.depth
, depth_entry
)
323 _mesa_set_remove(batch
->cache
.depth
, depth_entry
);
327 * Emits an appropriate flush for a BO if it has been rendered to within the
328 * same batchbuffer as a read that's about to be emitted.
330 * The GPU has separate, incoherent caches for the render cache and the
331 * sampler cache, along with other caches. Usually data in the different
332 * caches don't interact (e.g. we don't render to our driver-generated
333 * immediate constant data), but for render-to-texture in FBOs we definitely
334 * do. When a batchbuffer is flushed, the kernel will ensure that everything
335 * necessary is flushed before another use of that BO, but for reuse from
336 * different caches within a batchbuffer, it's all our responsibility.
339 iris_flush_depth_and_render_caches(struct iris_batch
*batch
)
341 iris_emit_pipe_control_flush(batch
,
342 "cache tracker: render-to-texture",
343 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
344 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
345 PIPE_CONTROL_CS_STALL
);
347 iris_emit_pipe_control_flush(batch
,
348 "cache tracker: render-to-texture",
349 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
|
350 PIPE_CONTROL_CONST_CACHE_INVALIDATE
);
352 iris_cache_sets_clear(batch
);
356 iris_cache_flush_for_read(struct iris_batch
*batch
,
359 if (_mesa_hash_table_search_pre_hashed(batch
->cache
.render
, bo
->hash
, bo
) ||
360 _mesa_set_search_pre_hashed(batch
->cache
.depth
, bo
->hash
, bo
))
361 iris_flush_depth_and_render_caches(batch
);
365 format_aux_tuple(enum isl_format format
, enum isl_aux_usage aux_usage
)
367 return (void *)(uintptr_t)((uint32_t)format
<< 8 | aux_usage
);
371 iris_cache_flush_for_render(struct iris_batch
*batch
,
373 enum isl_format format
,
374 enum isl_aux_usage aux_usage
)
376 if (_mesa_set_search_pre_hashed(batch
->cache
.depth
, bo
->hash
, bo
))
377 iris_flush_depth_and_render_caches(batch
);
379 /* Check to see if this bo has been used by a previous rendering operation
380 * but with a different format or aux usage. If it has, flush the render
381 * cache so we ensure that it's only in there with one format or aux usage
384 * Even though it's not obvious, this can easily happen in practice.
385 * Suppose a client is blending on a surface with sRGB encode enabled on
386 * gen9. This implies that you get AUX_USAGE_CCS_D at best. If the client
387 * then disables sRGB decode and continues blending we will flip on
388 * AUX_USAGE_CCS_E without doing any sort of resolve in-between (this is
389 * perfectly valid since CCS_E is a subset of CCS_D). However, this means
390 * that we have fragments in-flight which are rendering with UNORM+CCS_E
391 * and other fragments in-flight with SRGB+CCS_D on the same surface at the
392 * same time and the pixel scoreboard and color blender are trying to sort
393 * it all out. This ends badly (i.e. GPU hangs).
395 * To date, we have never observed GPU hangs or even corruption to be
396 * associated with switching the format, only the aux usage. However,
397 * there are comments in various docs which indicate that the render cache
398 * isn't 100% resilient to format changes. We may as well be conservative
399 * and flush on format changes too. We can always relax this later if we
400 * find it to be a performance problem.
402 struct hash_entry
*entry
=
403 _mesa_hash_table_search_pre_hashed(batch
->cache
.render
, bo
->hash
, bo
);
404 if (entry
&& entry
->data
!= format_aux_tuple(format
, aux_usage
))
405 iris_flush_depth_and_render_caches(batch
);
409 iris_render_cache_add_bo(struct iris_batch
*batch
,
411 enum isl_format format
,
412 enum isl_aux_usage aux_usage
)
415 struct hash_entry
*entry
=
416 _mesa_hash_table_search_pre_hashed(batch
->cache
.render
, bo
->hash
, bo
);
418 /* Otherwise, someone didn't do a flush_for_render and that would be
421 assert(entry
->data
== format_aux_tuple(format
, aux_usage
));
425 _mesa_hash_table_insert_pre_hashed(batch
->cache
.render
, bo
->hash
, bo
,
426 format_aux_tuple(format
, aux_usage
));
430 iris_cache_flush_for_depth(struct iris_batch
*batch
,
433 if (_mesa_hash_table_search_pre_hashed(batch
->cache
.render
, bo
->hash
, bo
))
434 iris_flush_depth_and_render_caches(batch
);
438 iris_depth_cache_add_bo(struct iris_batch
*batch
, struct iris_bo
*bo
)
440 _mesa_set_add_pre_hashed(batch
->cache
.depth
, bo
->hash
, bo
);
444 iris_resolve_color(struct iris_context
*ice
,
445 struct iris_batch
*batch
,
446 struct iris_resource
*res
,
447 unsigned level
, unsigned layer
,
448 enum isl_aux_op resolve_op
)
450 //DBG("%s to mt %p level %u layer %u\n", __FUNCTION__, mt, level, layer);
452 struct blorp_surf surf
;
453 iris_blorp_surf_for_resource(&ice
->vtbl
, &surf
, &res
->base
, res
->aux
.usage
,
456 iris_batch_maybe_flush(batch
, 1500);
458 /* Ivybridge PRM Vol 2, Part 1, "11.7 MCS Buffer for Render Target(s)":
460 * "Any transition from any value in {Clear, Render, Resolve} to a
461 * different value in {Clear, Render, Resolve} requires end of pipe
464 * In other words, fast clear ops are not properly synchronized with
465 * other drawing. We need to use a PIPE_CONTROL to ensure that the
466 * contents of the previous draw hit the render target before we resolve
467 * and again afterwards to ensure that the resolve is complete before we
468 * do any more regular drawing.
470 iris_emit_end_of_pipe_sync(batch
, "color resolve: pre-flush",
471 PIPE_CONTROL_RENDER_TARGET_FLUSH
);
473 struct blorp_batch blorp_batch
;
474 blorp_batch_init(&ice
->blorp
, &blorp_batch
, batch
, 0);
475 blorp_ccs_resolve(&blorp_batch
, &surf
, level
, layer
, 1,
476 isl_format_srgb_to_linear(res
->surf
.format
),
478 blorp_batch_finish(&blorp_batch
);
480 /* See comment above */
481 iris_emit_end_of_pipe_sync(batch
, "color resolve: post-flush",
482 PIPE_CONTROL_RENDER_TARGET_FLUSH
);
486 iris_mcs_partial_resolve(struct iris_context
*ice
,
487 struct iris_batch
*batch
,
488 struct iris_resource
*res
,
489 uint32_t start_layer
,
492 //DBG("%s to mt %p layers %u-%u\n", __FUNCTION__, mt,
493 //start_layer, start_layer + num_layers - 1);
495 assert(res
->aux
.usage
== ISL_AUX_USAGE_MCS
);
497 struct blorp_surf surf
;
498 iris_blorp_surf_for_resource(&ice
->vtbl
, &surf
, &res
->base
, res
->aux
.usage
,
501 struct blorp_batch blorp_batch
;
502 blorp_batch_init(&ice
->blorp
, &blorp_batch
, batch
, 0);
503 blorp_mcs_partial_resolve(&blorp_batch
, &surf
,
504 isl_format_srgb_to_linear(res
->surf
.format
),
505 start_layer
, num_layers
);
506 blorp_batch_finish(&blorp_batch
);
511 * Return true if the format that will be used to access the resource is
512 * CCS_E-compatible with the resource's linear/non-sRGB format.
514 * Why use the linear format? Well, although the resourcemay be specified
515 * with an sRGB format, the usage of that color space/format can be toggled.
516 * Since our HW tends to support more linear formats than sRGB ones, we use
517 * this format variant for check for CCS_E compatibility.
520 format_ccs_e_compat_with_resource(const struct gen_device_info
*devinfo
,
521 const struct iris_resource
*res
,
522 enum isl_format access_format
)
524 assert(res
->aux
.usage
== ISL_AUX_USAGE_CCS_E
);
526 enum isl_format isl_format
= isl_format_srgb_to_linear(res
->surf
.format
);
527 return isl_formats_are_ccs_e_compatible(devinfo
, isl_format
, access_format
);
531 sample_with_hiz(const struct gen_device_info
*devinfo
,
532 const struct iris_resource
*res
)
534 if (!devinfo
->has_sample_with_hiz
)
537 if (res
->aux
.usage
!= ISL_AUX_USAGE_HIZ
)
540 /* It seems the hardware won't fallback to the depth buffer if some of the
541 * mipmap levels aren't available in the HiZ buffer. So we need all levels
542 * of the texture to be HiZ enabled.
544 for (unsigned level
= 0; level
< res
->surf
.levels
; ++level
) {
545 if (!iris_resource_level_has_hiz(res
, level
))
549 /* If compressed multisampling is enabled, then we use it for the auxiliary
552 * From the BDW PRM (Volume 2d: Command Reference: Structures
553 * RENDER_SURFACE_STATE.AuxiliarySurfaceMode):
555 * "If this field is set to AUX_HIZ, Number of Multisamples must be
556 * MULTISAMPLECOUNT_1, and Surface Type cannot be SURFTYPE_3D.
558 * There is no such blurb for 1D textures, but there is sufficient evidence
559 * that this is broken on SKL+.
561 // XXX: i965 disables this for arrays too, is that reasonable?
562 return res
->surf
.samples
== 1 && res
->surf
.dim
== ISL_SURF_DIM_2D
;
566 * Perform a HiZ or depth resolve operation.
568 * For an overview of HiZ ops, see the following sections of the Sandy Bridge
569 * PRM, Volume 1, Part 2:
570 * - 7.5.3.1 Depth Buffer Clear
571 * - 7.5.3.2 Depth Buffer Resolve
572 * - 7.5.3.3 Hierarchical Depth Buffer Resolve
575 iris_hiz_exec(struct iris_context
*ice
,
576 struct iris_batch
*batch
,
577 struct iris_resource
*res
,
578 unsigned int level
, unsigned int start_layer
,
579 unsigned int num_layers
, enum isl_aux_op op
,
580 bool update_clear_depth
)
582 assert(iris_resource_level_has_hiz(res
, level
));
583 assert(op
!= ISL_AUX_OP_NONE
);
584 UNUSED
const char *name
= NULL
;
587 case ISL_AUX_OP_FULL_RESOLVE
:
588 name
= "depth resolve";
590 case ISL_AUX_OP_AMBIGUATE
:
591 name
= "hiz ambiguate";
593 case ISL_AUX_OP_FAST_CLEAR
:
594 name
= "depth clear";
596 case ISL_AUX_OP_PARTIAL_RESOLVE
:
597 case ISL_AUX_OP_NONE
:
598 unreachable("Invalid HiZ op");
601 //DBG("%s %s to mt %p level %d layers %d-%d\n",
602 //__func__, name, mt, level, start_layer, start_layer + num_layers - 1);
604 /* The following stalls and flushes are only documented to be required
605 * for HiZ clear operations. However, they also seem to be required for
606 * resolve operations.
608 * From the Ivybridge PRM, volume 2, "Depth Buffer Clear":
610 * "If other rendering operations have preceded this clear, a
611 * PIPE_CONTROL with depth cache flush enabled, Depth Stall bit
612 * enabled must be issued before the rectangle primitive used for
613 * the depth buffer clear operation."
615 * Same applies for Gen8 and Gen9.
617 * In addition, from the Ivybridge PRM, volume 2, 1.10.4.1
618 * PIPE_CONTROL, Depth Cache Flush Enable:
620 * "This bit must not be set when Depth Stall Enable bit is set in
623 * This is confirmed to hold for real, Haswell gets immediate gpu hangs.
625 * Therefore issue two pipe control flushes, one for cache flush and
626 * another for depth stall.
628 iris_emit_pipe_control_flush(batch
,
629 "hiz op: pre-flushes (1/2)",
630 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
631 PIPE_CONTROL_CS_STALL
);
633 iris_emit_pipe_control_flush(batch
, "hiz op: pre-flushes (2/2)",
634 PIPE_CONTROL_DEPTH_STALL
);
636 assert(res
->aux
.usage
== ISL_AUX_USAGE_HIZ
&& res
->aux
.bo
);
638 iris_batch_maybe_flush(batch
, 1500);
640 struct blorp_surf surf
;
641 iris_blorp_surf_for_resource(&ice
->vtbl
, &surf
, &res
->base
,
642 ISL_AUX_USAGE_HIZ
, level
, true);
644 struct blorp_batch blorp_batch
;
645 enum blorp_batch_flags flags
= 0;
646 flags
|= update_clear_depth
? 0 : BLORP_BATCH_NO_UPDATE_CLEAR_COLOR
;
647 blorp_batch_init(&ice
->blorp
, &blorp_batch
, batch
, flags
);
648 blorp_hiz_op(&blorp_batch
, &surf
, level
, start_layer
, num_layers
, op
);
649 blorp_batch_finish(&blorp_batch
);
651 /* The following stalls and flushes are only documented to be required
652 * for HiZ clear operations. However, they also seem to be required for
653 * resolve operations.
655 * From the Broadwell PRM, volume 7, "Depth Buffer Clear":
657 * "Depth buffer clear pass using any of the methods (WM_STATE,
658 * 3DSTATE_WM or 3DSTATE_WM_HZ_OP) must be followed by a
659 * PIPE_CONTROL command with DEPTH_STALL bit and Depth FLUSH bits
660 * "set" before starting to render. DepthStall and DepthFlush are
661 * not needed between consecutive depth clear passes nor is it
662 * required if the depth clear pass was done with
663 * 'full_surf_clear' bit set in the 3DSTATE_WM_HZ_OP."
665 * TODO: Such as the spec says, this could be conditional.
667 iris_emit_pipe_control_flush(batch
,
668 "hiz op: post flush",
669 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
670 PIPE_CONTROL_DEPTH_STALL
);
674 * Does the resource's slice have hiz enabled?
677 iris_resource_level_has_hiz(const struct iris_resource
*res
, uint32_t level
)
679 iris_resource_check_level_layer(res
, level
, 0);
680 return res
->aux
.has_hiz
& 1 << level
;
683 /** \brief Assert that the level and layer are valid for the resource. */
685 iris_resource_check_level_layer(UNUSED
const struct iris_resource
*res
,
686 UNUSED
uint32_t level
, UNUSED
uint32_t layer
)
688 assert(level
< res
->surf
.levels
);
689 assert(layer
< util_num_layers(&res
->base
, level
));
692 static inline uint32_t
693 miptree_level_range_length(const struct iris_resource
*res
,
694 uint32_t start_level
, uint32_t num_levels
)
696 assert(start_level
< res
->surf
.levels
);
698 if (num_levels
== INTEL_REMAINING_LAYERS
)
699 num_levels
= res
->surf
.levels
;
701 /* Check for overflow */
702 assert(start_level
+ num_levels
>= start_level
);
703 assert(start_level
+ num_levels
<= res
->surf
.levels
);
708 static inline uint32_t
709 miptree_layer_range_length(const struct iris_resource
*res
, uint32_t level
,
710 uint32_t start_layer
, uint32_t num_layers
)
712 assert(level
<= res
->base
.last_level
);
714 const uint32_t total_num_layers
= iris_get_num_logical_layers(res
, level
);
715 assert(start_layer
< total_num_layers
);
716 if (num_layers
== INTEL_REMAINING_LAYERS
)
717 num_layers
= total_num_layers
- start_layer
;
718 /* Check for overflow */
719 assert(start_layer
+ num_layers
>= start_layer
);
720 assert(start_layer
+ num_layers
<= total_num_layers
);
726 has_color_unresolved(const struct iris_resource
*res
,
727 unsigned start_level
, unsigned num_levels
,
728 unsigned start_layer
, unsigned num_layers
)
733 /* Clamp the level range to fit the resource */
734 num_levels
= miptree_level_range_length(res
, start_level
, num_levels
);
736 for (uint32_t l
= 0; l
< num_levels
; l
++) {
737 const uint32_t level
= start_level
+ l
;
738 const uint32_t level_layers
=
739 miptree_layer_range_length(res
, level
, start_layer
, num_layers
);
740 for (unsigned a
= 0; a
< level_layers
; a
++) {
741 enum isl_aux_state aux_state
=
742 iris_resource_get_aux_state(res
, level
, start_layer
+ a
);
743 assert(aux_state
!= ISL_AUX_STATE_AUX_INVALID
);
744 if (aux_state
!= ISL_AUX_STATE_PASS_THROUGH
)
752 static enum isl_aux_op
753 get_ccs_d_resolve_op(enum isl_aux_state aux_state
,
754 enum isl_aux_usage aux_usage
,
755 bool fast_clear_supported
)
757 assert(aux_usage
== ISL_AUX_USAGE_NONE
|| aux_usage
== ISL_AUX_USAGE_CCS_D
);
759 const bool ccs_supported
= aux_usage
== ISL_AUX_USAGE_CCS_D
;
761 assert(ccs_supported
== fast_clear_supported
);
764 case ISL_AUX_STATE_CLEAR
:
765 case ISL_AUX_STATE_PARTIAL_CLEAR
:
767 return ISL_AUX_OP_FULL_RESOLVE
;
769 return ISL_AUX_OP_NONE
;
771 case ISL_AUX_STATE_PASS_THROUGH
:
772 return ISL_AUX_OP_NONE
;
774 case ISL_AUX_STATE_RESOLVED
:
775 case ISL_AUX_STATE_AUX_INVALID
:
776 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
777 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
781 unreachable("Invalid aux state for CCS_D");
784 static enum isl_aux_op
785 get_ccs_e_resolve_op(enum isl_aux_state aux_state
,
786 enum isl_aux_usage aux_usage
,
787 bool fast_clear_supported
)
789 /* CCS_E surfaces can be accessed as CCS_D if we're careful. */
790 assert(aux_usage
== ISL_AUX_USAGE_NONE
||
791 aux_usage
== ISL_AUX_USAGE_CCS_D
||
792 aux_usage
== ISL_AUX_USAGE_CCS_E
);
794 if (aux_usage
== ISL_AUX_USAGE_CCS_D
)
795 assert(fast_clear_supported
);
798 case ISL_AUX_STATE_CLEAR
:
799 case ISL_AUX_STATE_PARTIAL_CLEAR
:
800 if (fast_clear_supported
)
801 return ISL_AUX_OP_NONE
;
802 else if (aux_usage
== ISL_AUX_USAGE_CCS_E
)
803 return ISL_AUX_OP_PARTIAL_RESOLVE
;
805 return ISL_AUX_OP_FULL_RESOLVE
;
807 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
808 if (aux_usage
!= ISL_AUX_USAGE_CCS_E
)
809 return ISL_AUX_OP_FULL_RESOLVE
;
810 else if (!fast_clear_supported
)
811 return ISL_AUX_OP_PARTIAL_RESOLVE
;
813 return ISL_AUX_OP_NONE
;
815 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
816 if (aux_usage
!= ISL_AUX_USAGE_CCS_E
)
817 return ISL_AUX_OP_FULL_RESOLVE
;
819 return ISL_AUX_OP_NONE
;
821 case ISL_AUX_STATE_PASS_THROUGH
:
822 return ISL_AUX_OP_NONE
;
824 case ISL_AUX_STATE_RESOLVED
:
825 case ISL_AUX_STATE_AUX_INVALID
:
829 unreachable("Invalid aux state for CCS_E");
833 iris_resource_prepare_ccs_access(struct iris_context
*ice
,
834 struct iris_batch
*batch
,
835 struct iris_resource
*res
,
836 uint32_t level
, uint32_t layer
,
837 enum isl_aux_usage aux_usage
,
838 bool fast_clear_supported
)
840 enum isl_aux_state aux_state
= iris_resource_get_aux_state(res
, level
, layer
);
842 enum isl_aux_op resolve_op
;
843 if (res
->aux
.usage
== ISL_AUX_USAGE_CCS_E
) {
844 resolve_op
= get_ccs_e_resolve_op(aux_state
, aux_usage
,
845 fast_clear_supported
);
847 assert(res
->aux
.usage
== ISL_AUX_USAGE_CCS_D
);
848 resolve_op
= get_ccs_d_resolve_op(aux_state
, aux_usage
,
849 fast_clear_supported
);
852 if (resolve_op
!= ISL_AUX_OP_NONE
) {
853 iris_resolve_color(ice
, batch
, res
, level
, layer
, resolve_op
);
855 switch (resolve_op
) {
856 case ISL_AUX_OP_FULL_RESOLVE
:
857 /* The CCS full resolve operation destroys the CCS and sets it to the
858 * pass-through state. (You can also think of this as being both a
859 * resolve and an ambiguate in one operation.)
861 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1,
862 ISL_AUX_STATE_PASS_THROUGH
);
865 case ISL_AUX_OP_PARTIAL_RESOLVE
:
866 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1,
867 ISL_AUX_STATE_COMPRESSED_NO_CLEAR
);
871 unreachable("Invalid resolve op");
877 iris_resource_finish_ccs_write(struct iris_context
*ice
,
878 struct iris_resource
*res
,
879 uint32_t level
, uint32_t layer
,
880 enum isl_aux_usage aux_usage
)
882 assert(aux_usage
== ISL_AUX_USAGE_NONE
||
883 aux_usage
== ISL_AUX_USAGE_CCS_D
||
884 aux_usage
== ISL_AUX_USAGE_CCS_E
);
886 enum isl_aux_state aux_state
=
887 iris_resource_get_aux_state(res
, level
, layer
);
889 if (res
->aux
.usage
== ISL_AUX_USAGE_CCS_E
) {
891 case ISL_AUX_STATE_CLEAR
:
892 case ISL_AUX_STATE_PARTIAL_CLEAR
:
893 assert(aux_usage
== ISL_AUX_USAGE_CCS_E
||
894 aux_usage
== ISL_AUX_USAGE_CCS_D
);
896 if (aux_usage
== ISL_AUX_USAGE_CCS_E
) {
897 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1,
898 ISL_AUX_STATE_COMPRESSED_CLEAR
);
899 } else if (aux_state
!= ISL_AUX_STATE_PARTIAL_CLEAR
) {
900 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1,
901 ISL_AUX_STATE_PARTIAL_CLEAR
);
905 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
906 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
907 assert(aux_usage
== ISL_AUX_USAGE_CCS_E
);
908 break; /* Nothing to do */
910 case ISL_AUX_STATE_PASS_THROUGH
:
911 if (aux_usage
== ISL_AUX_USAGE_CCS_E
) {
912 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1,
913 ISL_AUX_STATE_COMPRESSED_NO_CLEAR
);
919 case ISL_AUX_STATE_RESOLVED
:
920 case ISL_AUX_STATE_AUX_INVALID
:
921 unreachable("Invalid aux state for CCS_E");
924 assert(res
->aux
.usage
== ISL_AUX_USAGE_CCS_D
);
925 /* CCS_D is a bit simpler */
927 case ISL_AUX_STATE_CLEAR
:
928 assert(aux_usage
== ISL_AUX_USAGE_CCS_D
);
929 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1,
930 ISL_AUX_STATE_PARTIAL_CLEAR
);
933 case ISL_AUX_STATE_PARTIAL_CLEAR
:
934 assert(aux_usage
== ISL_AUX_USAGE_CCS_D
);
935 break; /* Nothing to do */
937 case ISL_AUX_STATE_PASS_THROUGH
:
941 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
942 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
943 case ISL_AUX_STATE_RESOLVED
:
944 case ISL_AUX_STATE_AUX_INVALID
:
945 unreachable("Invalid aux state for CCS_D");
951 iris_resource_prepare_mcs_access(struct iris_context
*ice
,
952 struct iris_batch
*batch
,
953 struct iris_resource
*res
,
955 enum isl_aux_usage aux_usage
,
956 bool fast_clear_supported
)
958 assert(aux_usage
== ISL_AUX_USAGE_MCS
);
960 switch (iris_resource_get_aux_state(res
, 0, layer
)) {
961 case ISL_AUX_STATE_CLEAR
:
962 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
963 if (!fast_clear_supported
) {
964 iris_mcs_partial_resolve(ice
, batch
, res
, layer
, 1);
965 iris_resource_set_aux_state(ice
, res
, 0, layer
, 1,
966 ISL_AUX_STATE_COMPRESSED_NO_CLEAR
);
970 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
971 break; /* Nothing to do */
973 case ISL_AUX_STATE_RESOLVED
:
974 case ISL_AUX_STATE_PASS_THROUGH
:
975 case ISL_AUX_STATE_AUX_INVALID
:
976 case ISL_AUX_STATE_PARTIAL_CLEAR
:
977 unreachable("Invalid aux state for MCS");
982 iris_resource_finish_mcs_write(struct iris_context
*ice
,
983 struct iris_resource
*res
,
985 enum isl_aux_usage aux_usage
)
987 assert(aux_usage
== ISL_AUX_USAGE_MCS
);
989 switch (iris_resource_get_aux_state(res
, 0, layer
)) {
990 case ISL_AUX_STATE_CLEAR
:
991 iris_resource_set_aux_state(ice
, res
, 0, layer
, 1,
992 ISL_AUX_STATE_COMPRESSED_CLEAR
);
995 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
996 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
997 break; /* Nothing to do */
999 case ISL_AUX_STATE_RESOLVED
:
1000 case ISL_AUX_STATE_PASS_THROUGH
:
1001 case ISL_AUX_STATE_AUX_INVALID
:
1002 case ISL_AUX_STATE_PARTIAL_CLEAR
:
1003 unreachable("Invalid aux state for MCS");
1008 iris_resource_prepare_hiz_access(struct iris_context
*ice
,
1009 struct iris_batch
*batch
,
1010 struct iris_resource
*res
,
1011 uint32_t level
, uint32_t layer
,
1012 enum isl_aux_usage aux_usage
,
1013 bool fast_clear_supported
)
1015 assert(aux_usage
== ISL_AUX_USAGE_NONE
|| aux_usage
== ISL_AUX_USAGE_HIZ
);
1017 enum isl_aux_op hiz_op
= ISL_AUX_OP_NONE
;
1018 switch (iris_resource_get_aux_state(res
, level
, layer
)) {
1019 case ISL_AUX_STATE_CLEAR
:
1020 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
1021 if (aux_usage
!= ISL_AUX_USAGE_HIZ
|| !fast_clear_supported
)
1022 hiz_op
= ISL_AUX_OP_FULL_RESOLVE
;
1025 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
1026 if (aux_usage
!= ISL_AUX_USAGE_HIZ
)
1027 hiz_op
= ISL_AUX_OP_FULL_RESOLVE
;
1030 case ISL_AUX_STATE_PASS_THROUGH
:
1031 case ISL_AUX_STATE_RESOLVED
:
1034 case ISL_AUX_STATE_AUX_INVALID
:
1035 if (aux_usage
== ISL_AUX_USAGE_HIZ
)
1036 hiz_op
= ISL_AUX_OP_AMBIGUATE
;
1039 case ISL_AUX_STATE_PARTIAL_CLEAR
:
1040 unreachable("Invalid HiZ state");
1043 if (hiz_op
!= ISL_AUX_OP_NONE
) {
1044 iris_hiz_exec(ice
, batch
, res
, level
, layer
, 1, hiz_op
, false);
1047 case ISL_AUX_OP_FULL_RESOLVE
:
1048 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1,
1049 ISL_AUX_STATE_RESOLVED
);
1052 case ISL_AUX_OP_AMBIGUATE
:
1053 /* The HiZ resolve operation is actually an ambiguate */
1054 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1,
1055 ISL_AUX_STATE_PASS_THROUGH
);
1059 unreachable("Invalid HiZ op");
1065 iris_resource_finish_hiz_write(struct iris_context
*ice
,
1066 struct iris_resource
*res
,
1067 uint32_t level
, uint32_t layer
,
1068 enum isl_aux_usage aux_usage
)
1070 assert(aux_usage
== ISL_AUX_USAGE_NONE
|| aux_usage
== ISL_AUX_USAGE_HIZ
);
1072 switch (iris_resource_get_aux_state(res
, level
, layer
)) {
1073 case ISL_AUX_STATE_CLEAR
:
1074 assert(aux_usage
== ISL_AUX_USAGE_HIZ
);
1075 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1,
1076 ISL_AUX_STATE_COMPRESSED_CLEAR
);
1079 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
1080 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
1081 assert(aux_usage
== ISL_AUX_USAGE_HIZ
);
1082 break; /* Nothing to do */
1084 case ISL_AUX_STATE_RESOLVED
:
1085 if (aux_usage
== ISL_AUX_USAGE_HIZ
) {
1086 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1,
1087 ISL_AUX_STATE_COMPRESSED_NO_CLEAR
);
1089 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1,
1090 ISL_AUX_STATE_AUX_INVALID
);
1094 case ISL_AUX_STATE_PASS_THROUGH
:
1095 if (aux_usage
== ISL_AUX_USAGE_HIZ
) {
1096 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1,
1097 ISL_AUX_STATE_COMPRESSED_NO_CLEAR
);
1101 case ISL_AUX_STATE_AUX_INVALID
:
1102 assert(aux_usage
!= ISL_AUX_USAGE_HIZ
);
1105 case ISL_AUX_STATE_PARTIAL_CLEAR
:
1106 unreachable("Invalid HiZ state");
1111 iris_resource_prepare_access(struct iris_context
*ice
,
1112 struct iris_batch
*batch
,
1113 struct iris_resource
*res
,
1114 uint32_t start_level
, uint32_t num_levels
,
1115 uint32_t start_layer
, uint32_t num_layers
,
1116 enum isl_aux_usage aux_usage
,
1117 bool fast_clear_supported
)
1119 num_levels
= miptree_level_range_length(res
, start_level
, num_levels
);
1121 switch (res
->aux
.usage
) {
1122 case ISL_AUX_USAGE_NONE
:
1126 case ISL_AUX_USAGE_MCS
:
1127 assert(start_level
== 0 && num_levels
== 1);
1128 const uint32_t level_layers
=
1129 miptree_layer_range_length(res
, 0, start_layer
, num_layers
);
1130 for (uint32_t a
= 0; a
< level_layers
; a
++) {
1131 iris_resource_prepare_mcs_access(ice
, batch
, res
, start_layer
+ a
,
1132 aux_usage
, fast_clear_supported
);
1136 case ISL_AUX_USAGE_CCS_D
:
1137 case ISL_AUX_USAGE_CCS_E
:
1138 for (uint32_t l
= 0; l
< num_levels
; l
++) {
1139 const uint32_t level
= start_level
+ l
;
1140 const uint32_t level_layers
=
1141 miptree_layer_range_length(res
, level
, start_layer
, num_layers
);
1142 for (uint32_t a
= 0; a
< level_layers
; a
++) {
1143 iris_resource_prepare_ccs_access(ice
, batch
, res
, level
,
1145 aux_usage
, fast_clear_supported
);
1150 case ISL_AUX_USAGE_HIZ
:
1151 for (uint32_t l
= 0; l
< num_levels
; l
++) {
1152 const uint32_t level
= start_level
+ l
;
1153 if (!iris_resource_level_has_hiz(res
, level
))
1156 const uint32_t level_layers
=
1157 miptree_layer_range_length(res
, level
, start_layer
, num_layers
);
1158 for (uint32_t a
= 0; a
< level_layers
; a
++) {
1159 iris_resource_prepare_hiz_access(ice
, batch
, res
, level
,
1160 start_layer
+ a
, aux_usage
,
1161 fast_clear_supported
);
1167 unreachable("Invalid aux usage");
1172 iris_resource_finish_write(struct iris_context
*ice
,
1173 struct iris_resource
*res
, uint32_t level
,
1174 uint32_t start_layer
, uint32_t num_layers
,
1175 enum isl_aux_usage aux_usage
)
1177 num_layers
= miptree_layer_range_length(res
, level
, start_layer
, num_layers
);
1179 switch (res
->aux
.usage
) {
1180 case ISL_AUX_USAGE_NONE
:
1183 case ISL_AUX_USAGE_MCS
:
1184 for (uint32_t a
= 0; a
< num_layers
; a
++) {
1185 iris_resource_finish_mcs_write(ice
, res
, start_layer
+ a
,
1190 case ISL_AUX_USAGE_CCS_D
:
1191 case ISL_AUX_USAGE_CCS_E
:
1192 for (uint32_t a
= 0; a
< num_layers
; a
++) {
1193 iris_resource_finish_ccs_write(ice
, res
, level
, start_layer
+ a
,
1198 case ISL_AUX_USAGE_HIZ
:
1199 if (!iris_resource_level_has_hiz(res
, level
))
1202 for (uint32_t a
= 0; a
< num_layers
; a
++) {
1203 iris_resource_finish_hiz_write(ice
, res
, level
, start_layer
+ a
,
1209 unreachable("Invavlid aux usage");
1214 iris_resource_get_aux_state(const struct iris_resource
*res
,
1215 uint32_t level
, uint32_t layer
)
1217 iris_resource_check_level_layer(res
, level
, layer
);
1219 if (res
->surf
.usage
& ISL_SURF_USAGE_DEPTH_BIT
) {
1220 assert(iris_resource_level_has_hiz(res
, level
));
1221 } else if (res
->surf
.usage
& ISL_SURF_USAGE_STENCIL_BIT
) {
1222 unreachable("Cannot get aux state for stencil");
1224 assert(res
->surf
.samples
== 1 ||
1225 res
->surf
.msaa_layout
== ISL_MSAA_LAYOUT_ARRAY
);
1228 return res
->aux
.state
[level
][layer
];
1232 iris_resource_set_aux_state(struct iris_context
*ice
,
1233 struct iris_resource
*res
, uint32_t level
,
1234 uint32_t start_layer
, uint32_t num_layers
,
1235 enum isl_aux_state aux_state
)
1237 num_layers
= miptree_layer_range_length(res
, level
, start_layer
, num_layers
);
1239 if (res
->surf
.usage
& ISL_SURF_USAGE_DEPTH_BIT
) {
1240 assert(iris_resource_level_has_hiz(res
, level
));
1241 } else if (res
->surf
.usage
& ISL_SURF_USAGE_STENCIL_BIT
) {
1242 unreachable("Cannot set aux state for stencil");
1244 assert(res
->surf
.samples
== 1 ||
1245 res
->surf
.msaa_layout
== ISL_MSAA_LAYOUT_ARRAY
);
1248 for (unsigned a
= 0; a
< num_layers
; a
++) {
1249 if (res
->aux
.state
[level
][start_layer
+ a
] != aux_state
) {
1250 res
->aux
.state
[level
][start_layer
+ a
] = aux_state
;
1251 /* XXX: Need to track which bindings to make dirty */
1252 ice
->state
.dirty
|= IRIS_ALL_DIRTY_BINDINGS
;
1257 /* On Gen9 color buffers may be compressed by the hardware (lossless
1258 * compression). There are, however, format restrictions and care needs to be
1259 * taken that the sampler engine is capable for re-interpreting a buffer with
1260 * format different the buffer was originally written with.
1262 * For example, SRGB formats are not compressible and the sampler engine isn't
1263 * capable of treating RGBA_UNORM as SRGB_ALPHA. In such a case the underlying
1264 * color buffer needs to be resolved so that the sampling surface can be
1265 * sampled as non-compressed (i.e., without the auxiliary MCS buffer being
1269 can_texture_with_ccs(const struct gen_device_info
*devinfo
,
1270 struct pipe_debug_callback
*dbg
,
1271 const struct iris_resource
*res
,
1272 enum isl_format view_format
)
1274 if (res
->aux
.usage
!= ISL_AUX_USAGE_CCS_E
)
1277 if (!format_ccs_e_compat_with_resource(devinfo
, res
, view_format
)) {
1278 const struct isl_format_layout
*res_fmtl
=
1279 isl_format_get_layout(res
->surf
.format
);
1280 const struct isl_format_layout
*view_fmtl
=
1281 isl_format_get_layout(view_format
);
1283 perf_debug(dbg
, "Incompatible sampling format (%s) for CCS (%s)\n",
1284 view_fmtl
->name
, res_fmtl
->name
);
1293 iris_resource_texture_aux_usage(struct iris_context
*ice
,
1294 const struct iris_resource
*res
,
1295 enum isl_format view_format
,
1296 enum gen9_astc5x5_wa_tex_type astc5x5_wa_bits
)
1298 struct iris_screen
*screen
= (void *) ice
->ctx
.screen
;
1299 struct gen_device_info
*devinfo
= &screen
->devinfo
;
1301 assert(devinfo
->gen
== 9 || astc5x5_wa_bits
== 0);
1303 /* On gen9, ASTC 5x5 textures cannot live in the sampler cache along side
1304 * CCS or HiZ compressed textures. See gen9_apply_astc5x5_wa_flush() for
1307 if ((astc5x5_wa_bits
& GEN9_ASTC5X5_WA_TEX_TYPE_ASTC5x5
) &&
1308 res
->aux
.usage
!= ISL_AUX_USAGE_MCS
)
1309 return ISL_AUX_USAGE_NONE
;
1311 switch (res
->aux
.usage
) {
1312 case ISL_AUX_USAGE_HIZ
:
1313 if (sample_with_hiz(devinfo
, res
))
1314 return ISL_AUX_USAGE_HIZ
;
1317 case ISL_AUX_USAGE_MCS
:
1318 return ISL_AUX_USAGE_MCS
;
1320 case ISL_AUX_USAGE_CCS_D
:
1321 case ISL_AUX_USAGE_CCS_E
:
1322 /* If we don't have any unresolved color, report an aux usage of
1323 * ISL_AUX_USAGE_NONE. This way, texturing won't even look at the
1324 * aux surface and we can save some bandwidth.
1326 if (!has_color_unresolved(res
, 0, INTEL_REMAINING_LEVELS
,
1327 0, INTEL_REMAINING_LAYERS
))
1328 return ISL_AUX_USAGE_NONE
;
1330 if (can_texture_with_ccs(devinfo
, &ice
->dbg
, res
, view_format
))
1331 return ISL_AUX_USAGE_CCS_E
;
1338 return ISL_AUX_USAGE_NONE
;
1342 isl_formats_are_fast_clear_compatible(enum isl_format a
, enum isl_format b
)
1344 /* On gen8 and earlier, the hardware was only capable of handling 0/1 clear
1345 * values so sRGB curve application was a no-op for all fast-clearable
1348 * On gen9+, the hardware supports arbitrary clear values. For sRGB clear
1349 * values, the hardware interprets the floats, not as what would be
1350 * returned from the sampler (or written by the shader), but as being
1351 * between format conversion and sRGB curve application. This means that
1352 * we can switch between sRGB and UNORM without having to whack the clear
1355 return isl_format_srgb_to_linear(a
) == isl_format_srgb_to_linear(b
);
1359 iris_resource_prepare_texture(struct iris_context
*ice
,
1360 struct iris_batch
*batch
,
1361 struct iris_resource
*res
,
1362 enum isl_format view_format
,
1363 uint32_t start_level
, uint32_t num_levels
,
1364 uint32_t start_layer
, uint32_t num_layers
,
1365 enum gen9_astc5x5_wa_tex_type astc5x5_wa_bits
)
1367 enum isl_aux_usage aux_usage
=
1368 iris_resource_texture_aux_usage(ice
, res
, view_format
, astc5x5_wa_bits
);
1370 bool clear_supported
= aux_usage
!= ISL_AUX_USAGE_NONE
;
1372 /* Clear color is specified as ints or floats and the conversion is done by
1373 * the sampler. If we have a texture view, we would have to perform the
1374 * clear color conversion manually. Just disable clear color.
1376 if (!isl_formats_are_fast_clear_compatible(res
->surf
.format
, view_format
))
1377 clear_supported
= false;
1379 iris_resource_prepare_access(ice
, batch
, res
, start_level
, num_levels
,
1380 start_layer
, num_layers
,
1381 aux_usage
, clear_supported
);
1385 iris_resource_prepare_image(struct iris_context
*ice
,
1386 struct iris_batch
*batch
,
1387 struct iris_resource
*res
)
1389 /* The data port doesn't understand any compression */
1390 iris_resource_prepare_access(ice
, batch
, res
, 0, INTEL_REMAINING_LEVELS
,
1391 0, INTEL_REMAINING_LAYERS
,
1392 ISL_AUX_USAGE_NONE
, false);
1396 iris_resource_render_aux_usage(struct iris_context
*ice
,
1397 struct iris_resource
*res
,
1398 enum isl_format render_format
,
1400 bool draw_aux_disabled
)
1402 struct iris_screen
*screen
= (void *) ice
->ctx
.screen
;
1403 struct gen_device_info
*devinfo
= &screen
->devinfo
;
1405 if (draw_aux_disabled
)
1406 return ISL_AUX_USAGE_NONE
;
1408 switch (res
->aux
.usage
) {
1409 case ISL_AUX_USAGE_MCS
:
1410 return ISL_AUX_USAGE_MCS
;
1412 case ISL_AUX_USAGE_CCS_D
:
1413 case ISL_AUX_USAGE_CCS_E
:
1414 /* Gen9+ hardware technically supports non-0/1 clear colors with sRGB
1415 * formats. However, there are issues with blending where it doesn't
1416 * properly apply the sRGB curve to the clear color when blending.
1418 if (devinfo
->gen
>= 9 && blend_enabled
&&
1419 isl_format_is_srgb(render_format
) &&
1420 !isl_color_value_is_zero_one(res
->aux
.clear_color
, render_format
))
1421 return ISL_AUX_USAGE_NONE
;
1423 if (res
->aux
.usage
== ISL_AUX_USAGE_CCS_E
&&
1424 format_ccs_e_compat_with_resource(devinfo
, res
, render_format
))
1425 return ISL_AUX_USAGE_CCS_E
;
1427 /* Otherwise, we have to fall back to CCS_D */
1428 return ISL_AUX_USAGE_CCS_D
;
1431 return ISL_AUX_USAGE_NONE
;
1436 iris_resource_prepare_render(struct iris_context
*ice
,
1437 struct iris_batch
*batch
,
1438 struct iris_resource
*res
, uint32_t level
,
1439 uint32_t start_layer
, uint32_t layer_count
,
1440 enum isl_aux_usage aux_usage
)
1442 iris_resource_prepare_access(ice
, batch
, res
, level
, 1, start_layer
,
1443 layer_count
, aux_usage
,
1444 aux_usage
!= ISL_AUX_USAGE_NONE
);
1448 iris_resource_finish_render(struct iris_context
*ice
,
1449 struct iris_resource
*res
, uint32_t level
,
1450 uint32_t start_layer
, uint32_t layer_count
,
1451 enum isl_aux_usage aux_usage
)
1453 iris_resource_finish_write(ice
, res
, level
, start_layer
, layer_count
,
1458 iris_resource_prepare_depth(struct iris_context
*ice
,
1459 struct iris_batch
*batch
,
1460 struct iris_resource
*res
, uint32_t level
,
1461 uint32_t start_layer
, uint32_t layer_count
)
1463 iris_resource_prepare_access(ice
, batch
, res
, level
, 1, start_layer
,
1464 layer_count
, res
->aux
.usage
, !!res
->aux
.bo
);
1468 iris_resource_finish_depth(struct iris_context
*ice
,
1469 struct iris_resource
*res
, uint32_t level
,
1470 uint32_t start_layer
, uint32_t layer_count
,
1473 if (depth_written
) {
1474 iris_resource_finish_write(ice
, res
, level
, start_layer
, layer_count
,