2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
24 * @file iris_resolve.c
26 * This file handles resolve tracking for main and auxiliary surfaces.
28 * It also handles our cache tracking. We have sets for the render cache,
29 * depth cache, and so on. If a BO is in a cache's set, then it may have
30 * data in that cache. The helpers take care of emitting flushes for
31 * render-to-texture, format reinterpretation issues, and other situations.
34 #include "util/hash_table.h"
36 #include "iris_context.h"
39 * Disable auxiliary buffers if a renderbuffer is also bound as a texture
40 * or shader image. This causes a self-dependency, where both rendering
41 * and sampling may concurrently read or write the CCS buffer, causing
45 disable_rb_aux_buffer(struct iris_context
*ice
,
46 bool *draw_aux_buffer_disabled
,
47 struct iris_resource
*tex_res
,
48 unsigned min_level
, unsigned num_levels
,
51 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
54 /* We only need to worry about color compression and fast clears. */
55 if (tex_res
->aux
.usage
!= ISL_AUX_USAGE_CCS_D
&&
56 tex_res
->aux
.usage
!= ISL_AUX_USAGE_CCS_E
)
59 for (unsigned i
= 0; i
< cso_fb
->nr_cbufs
; i
++) {
60 struct iris_surface
*surf
= (void *) cso_fb
->cbufs
[i
];
64 struct iris_resource
*rb_res
= (void *) surf
->base
.texture
;
66 if (rb_res
->bo
== tex_res
->bo
&&
67 surf
->base
.u
.tex
.level
>= min_level
&&
68 surf
->base
.u
.tex
.level
< min_level
+ num_levels
) {
69 found
= draw_aux_buffer_disabled
[i
] = true;
75 "Disabling CCS because a renderbuffer is also bound %s.\n",
83 resolve_sampler_views(struct iris_context
*ice
,
84 struct iris_batch
*batch
,
85 struct iris_shader_state
*shs
,
86 bool *draw_aux_buffer_disabled
,
87 bool consider_framebuffer
)
89 uint32_t views
= shs
->bound_sampler_views
;
91 unsigned astc5x5_wa_bits
= 0; // XXX: actual tracking
94 const int i
= u_bit_scan(&views
);
95 struct iris_sampler_view
*isv
= shs
->textures
[i
];
96 struct iris_resource
*res
= (void *) isv
->base
.texture
;
98 if (res
->base
.target
!= PIPE_BUFFER
) {
99 if (consider_framebuffer
) {
100 disable_rb_aux_buffer(ice
, draw_aux_buffer_disabled
,
101 res
, isv
->view
.base_level
, isv
->view
.levels
,
105 iris_resource_prepare_texture(ice
, batch
, res
, isv
->view
.format
,
106 isv
->view
.base_level
, isv
->view
.levels
,
107 isv
->view
.base_array_layer
,
112 iris_cache_flush_for_read(batch
, res
->bo
);
117 resolve_image_views(struct iris_context
*ice
,
118 struct iris_batch
*batch
,
119 struct iris_shader_state
*shs
,
120 bool *draw_aux_buffer_disabled
,
121 bool consider_framebuffer
)
123 uint32_t views
= shs
->bound_image_views
;
126 const int i
= u_bit_scan(&views
);
127 struct iris_resource
*res
= (void *) shs
->image
[i
].res
;
129 if (res
->base
.target
!= PIPE_BUFFER
) {
130 if (consider_framebuffer
) {
131 disable_rb_aux_buffer(ice
, draw_aux_buffer_disabled
,
132 res
, 0, ~0, "as a shader image");
135 iris_resource_prepare_image(ice
, batch
, res
);
138 iris_cache_flush_for_read(batch
, res
->bo
);
144 * \brief Resolve buffers before drawing.
146 * Resolve the depth buffer's HiZ buffer, resolve the depth buffer of each
147 * enabled depth texture, and flush the render cache for any dirty textures.
150 iris_predraw_resolve_inputs(struct iris_context
*ice
,
151 struct iris_batch
*batch
,
152 struct iris_shader_state
*shs
,
153 bool *draw_aux_buffer_disabled
,
154 bool consider_framebuffer
)
156 resolve_sampler_views(ice
, batch
, shs
, draw_aux_buffer_disabled
, consider_framebuffer
);
157 resolve_image_views(ice
, batch
, shs
, draw_aux_buffer_disabled
, consider_framebuffer
);
163 iris_predraw_resolve_framebuffer(struct iris_context
*ice
,
164 struct iris_batch
*batch
,
165 bool *draw_aux_buffer_disabled
)
167 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
168 struct pipe_surface
*zs_surf
= cso_fb
->zsbuf
;
171 struct iris_resource
*z_res
, *s_res
;
172 iris_get_depth_stencil_resources(zs_surf
->texture
, &z_res
, &s_res
);
173 unsigned num_layers
=
174 zs_surf
->u
.tex
.last_layer
- zs_surf
->u
.tex
.first_layer
+ 1;
177 iris_resource_prepare_depth(ice
, batch
, z_res
, zs_surf
->u
.tex
.level
,
178 zs_surf
->u
.tex
.first_layer
, num_layers
);
182 for (unsigned i
= 0; i
< cso_fb
->nr_cbufs
; i
++) {
183 struct iris_surface
*surf
= (void *) cso_fb
->cbufs
[i
];
187 struct iris_resource
*res
= (void *) surf
->base
.texture
;
189 enum isl_aux_usage aux_usage
=
190 iris_resource_render_aux_usage(ice
, res
, surf
->view
.format
,
191 ice
->state
.blend_enables
& (1u << i
),
192 draw_aux_buffer_disabled
[i
]);
194 if (ice
->state
.draw_aux_usage
[i
] != aux_usage
) {
195 ice
->state
.draw_aux_usage
[i
] = aux_usage
;
196 /* XXX: Need to track which bindings to make dirty */
197 ice
->state
.dirty
|= IRIS_ALL_DIRTY_BINDINGS
;
200 iris_resource_prepare_render(ice
, batch
, res
, surf
->view
.base_level
,
201 surf
->view
.base_array_layer
,
202 surf
->view
.array_len
,
205 iris_cache_flush_for_render(batch
, res
->bo
, surf
->view
.format
,
211 * \brief Call this after drawing to mark which buffers need resolving
213 * If the depth buffer was written to and if it has an accompanying HiZ
214 * buffer, then mark that it needs a depth resolve.
216 * If the color buffer is a multisample window system buffer, then
217 * mark that it needs a downsample.
219 * Also mark any render targets which will be textured as needing a render
223 iris_postdraw_update_resolve_tracking(struct iris_context
*ice
,
224 struct iris_batch
*batch
)
226 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
227 struct pipe_surface
*zs_surf
= cso_fb
->zsbuf
;
229 // XXX: front buffer drawing?
232 struct iris_resource
*z_res
, *s_res
;
233 iris_get_depth_stencil_resources(zs_surf
->texture
, &z_res
, &s_res
);
234 unsigned num_layers
=
235 zs_surf
->u
.tex
.last_layer
- zs_surf
->u
.tex
.first_layer
+ 1;
238 iris_resource_finish_depth(ice
, z_res
, zs_surf
->u
.tex
.level
,
239 zs_surf
->u
.tex
.first_layer
, num_layers
,
240 ice
->state
.depth_writes_enabled
);
242 if (ice
->state
.depth_writes_enabled
)
243 iris_depth_cache_add_bo(batch
, z_res
->bo
);
247 iris_resource_finish_write(ice
, s_res
, zs_surf
->u
.tex
.level
,
248 zs_surf
->u
.tex
.first_layer
, num_layers
,
251 if (ice
->state
.stencil_writes_enabled
)
252 iris_depth_cache_add_bo(batch
, s_res
->bo
);
256 for (unsigned i
= 0; i
< cso_fb
->nr_cbufs
; i
++) {
257 struct iris_surface
*surf
= (void *) cso_fb
->cbufs
[i
];
261 struct iris_resource
*res
= (void *) surf
->base
.texture
;
262 union pipe_surface_desc
*desc
= &surf
->base
.u
;
263 unsigned num_layers
= desc
->tex
.last_layer
- desc
->tex
.first_layer
+ 1;
264 enum isl_aux_usage aux_usage
= ice
->state
.draw_aux_usage
[i
];
266 iris_render_cache_add_bo(batch
, res
->bo
, surf
->view
.format
, aux_usage
);
268 iris_resource_finish_render(ice
, res
, desc
->tex
.level
,
269 desc
->tex
.first_layer
, num_layers
,
275 * Clear the cache-tracking sets.
278 iris_cache_sets_clear(struct iris_batch
*batch
)
280 hash_table_foreach(batch
->cache
.render
, render_entry
)
281 _mesa_hash_table_remove(batch
->cache
.render
, render_entry
);
283 set_foreach(batch
->cache
.depth
, depth_entry
)
284 _mesa_set_remove(batch
->cache
.depth
, depth_entry
);
288 * Emits an appropriate flush for a BO if it has been rendered to within the
289 * same batchbuffer as a read that's about to be emitted.
291 * The GPU has separate, incoherent caches for the render cache and the
292 * sampler cache, along with other caches. Usually data in the different
293 * caches don't interact (e.g. we don't render to our driver-generated
294 * immediate constant data), but for render-to-texture in FBOs we definitely
295 * do. When a batchbuffer is flushed, the kernel will ensure that everything
296 * necessary is flushed before another use of that BO, but for reuse from
297 * different caches within a batchbuffer, it's all our responsibility.
300 iris_flush_depth_and_render_caches(struct iris_batch
*batch
)
302 iris_emit_pipe_control_flush(batch
,
303 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
304 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
305 PIPE_CONTROL_CS_STALL
);
307 iris_emit_pipe_control_flush(batch
,
308 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
|
309 PIPE_CONTROL_CONST_CACHE_INVALIDATE
);
311 iris_cache_sets_clear(batch
);
315 iris_cache_flush_for_read(struct iris_batch
*batch
,
318 if (_mesa_hash_table_search_pre_hashed(batch
->cache
.render
, bo
->hash
, bo
) ||
319 _mesa_set_search_pre_hashed(batch
->cache
.depth
, bo
->hash
, bo
))
320 iris_flush_depth_and_render_caches(batch
);
324 format_aux_tuple(enum isl_format format
, enum isl_aux_usage aux_usage
)
326 return (void *)(uintptr_t)((uint32_t)format
<< 8 | aux_usage
);
330 iris_cache_flush_for_render(struct iris_batch
*batch
,
332 enum isl_format format
,
333 enum isl_aux_usage aux_usage
)
335 if (_mesa_set_search_pre_hashed(batch
->cache
.depth
, bo
->hash
, bo
))
336 iris_flush_depth_and_render_caches(batch
);
338 /* Check to see if this bo has been used by a previous rendering operation
339 * but with a different format or aux usage. If it has, flush the render
340 * cache so we ensure that it's only in there with one format or aux usage
343 * Even though it's not obvious, this can easily happen in practice.
344 * Suppose a client is blending on a surface with sRGB encode enabled on
345 * gen9. This implies that you get AUX_USAGE_CCS_D at best. If the client
346 * then disables sRGB decode and continues blending we will flip on
347 * AUX_USAGE_CCS_E without doing any sort of resolve in-between (this is
348 * perfectly valid since CCS_E is a subset of CCS_D). However, this means
349 * that we have fragments in-flight which are rendering with UNORM+CCS_E
350 * and other fragments in-flight with SRGB+CCS_D on the same surface at the
351 * same time and the pixel scoreboard and color blender are trying to sort
352 * it all out. This ends badly (i.e. GPU hangs).
354 * To date, we have never observed GPU hangs or even corruption to be
355 * associated with switching the format, only the aux usage. However,
356 * there are comments in various docs which indicate that the render cache
357 * isn't 100% resilient to format changes. We may as well be conservative
358 * and flush on format changes too. We can always relax this later if we
359 * find it to be a performance problem.
361 struct hash_entry
*entry
=
362 _mesa_hash_table_search_pre_hashed(batch
->cache
.render
, bo
->hash
, bo
);
363 if (entry
&& entry
->data
!= format_aux_tuple(format
, aux_usage
))
364 iris_flush_depth_and_render_caches(batch
);
368 iris_render_cache_add_bo(struct iris_batch
*batch
,
370 enum isl_format format
,
371 enum isl_aux_usage aux_usage
)
374 struct hash_entry
*entry
=
375 _mesa_hash_table_search_pre_hashed(batch
->cache
.render
, bo
->hash
, bo
);
377 /* Otherwise, someone didn't do a flush_for_render and that would be
380 assert(entry
->data
== format_aux_tuple(format
, aux_usage
));
384 _mesa_hash_table_insert_pre_hashed(batch
->cache
.render
, bo
->hash
, bo
,
385 format_aux_tuple(format
, aux_usage
));
389 iris_cache_flush_for_depth(struct iris_batch
*batch
,
392 if (_mesa_hash_table_search_pre_hashed(batch
->cache
.render
, bo
->hash
, bo
))
393 iris_flush_depth_and_render_caches(batch
);
397 iris_depth_cache_add_bo(struct iris_batch
*batch
, struct iris_bo
*bo
)
399 _mesa_set_add_pre_hashed(batch
->cache
.depth
, bo
->hash
, bo
);
403 iris_resolve_color(struct iris_context
*ice
,
404 struct iris_batch
*batch
,
405 struct iris_resource
*res
,
406 unsigned level
, unsigned layer
,
407 enum isl_aux_op resolve_op
)
409 //DBG("%s to mt %p level %u layer %u\n", __FUNCTION__, mt, level, layer);
411 struct blorp_surf surf
;
412 iris_blorp_surf_for_resource(&ice
->vtbl
, &surf
, &res
->base
, res
->aux
.usage
,
415 iris_batch_maybe_flush(batch
, 1500);
417 /* Ivybridge PRM Vol 2, Part 1, "11.7 MCS Buffer for Render Target(s)":
419 * "Any transition from any value in {Clear, Render, Resolve} to a
420 * different value in {Clear, Render, Resolve} requires end of pipe
423 * In other words, fast clear ops are not properly synchronized with
424 * other drawing. We need to use a PIPE_CONTROL to ensure that the
425 * contents of the previous draw hit the render target before we resolve
426 * and again afterwards to ensure that the resolve is complete before we
427 * do any more regular drawing.
429 iris_emit_end_of_pipe_sync(batch
, PIPE_CONTROL_RENDER_TARGET_FLUSH
);
431 struct blorp_batch blorp_batch
;
432 blorp_batch_init(&ice
->blorp
, &blorp_batch
, batch
, 0);
433 blorp_ccs_resolve(&blorp_batch
, &surf
, level
, layer
, 1,
434 isl_format_srgb_to_linear(res
->surf
.format
),
436 blorp_batch_finish(&blorp_batch
);
438 /* See comment above */
439 iris_emit_end_of_pipe_sync(batch
, PIPE_CONTROL_RENDER_TARGET_FLUSH
);
443 iris_mcs_partial_resolve(struct iris_context
*ice
,
444 struct iris_batch
*batch
,
445 struct iris_resource
*res
,
446 uint32_t start_layer
,
449 //DBG("%s to mt %p layers %u-%u\n", __FUNCTION__, mt,
450 //start_layer, start_layer + num_layers - 1);
452 assert(res
->aux
.usage
== ISL_AUX_USAGE_MCS
);
454 struct blorp_surf surf
;
455 iris_blorp_surf_for_resource(&ice
->vtbl
, &surf
, &res
->base
, res
->aux
.usage
,
458 struct blorp_batch blorp_batch
;
459 blorp_batch_init(&ice
->blorp
, &blorp_batch
, batch
, 0);
460 blorp_mcs_partial_resolve(&blorp_batch
, &surf
, res
->surf
.format
,
461 start_layer
, num_layers
);
462 blorp_batch_finish(&blorp_batch
);
467 * Return true if the format that will be used to access the resource is
468 * CCS_E-compatible with the resource's linear/non-sRGB format.
470 * Why use the linear format? Well, although the resourcemay be specified
471 * with an sRGB format, the usage of that color space/format can be toggled.
472 * Since our HW tends to support more linear formats than sRGB ones, we use
473 * this format variant for check for CCS_E compatibility.
476 format_ccs_e_compat_with_resource(const struct gen_device_info
*devinfo
,
477 const struct iris_resource
*res
,
478 enum isl_format access_format
)
480 assert(res
->aux
.usage
== ISL_AUX_USAGE_CCS_E
);
482 enum isl_format isl_format
= isl_format_srgb_to_linear(res
->surf
.format
);
483 return isl_formats_are_ccs_e_compatible(devinfo
, isl_format
, access_format
);
487 sample_with_hiz(const struct gen_device_info
*devinfo
,
488 const struct iris_resource
*res
)
490 if (!devinfo
->has_sample_with_hiz
)
493 if (res
->aux
.usage
!= ISL_AUX_USAGE_HIZ
)
496 /* It seems the hardware won't fallback to the depth buffer if some of the
497 * mipmap levels aren't available in the HiZ buffer. So we need all levels
498 * of the texture to be HiZ enabled.
500 for (unsigned level
= 0; level
< res
->surf
.levels
; ++level
) {
501 if (!iris_resource_level_has_hiz(res
, level
))
505 /* If compressed multisampling is enabled, then we use it for the auxiliary
508 * From the BDW PRM (Volume 2d: Command Reference: Structures
509 * RENDER_SURFACE_STATE.AuxiliarySurfaceMode):
511 * "If this field is set to AUX_HIZ, Number of Multisamples must be
512 * MULTISAMPLECOUNT_1, and Surface Type cannot be SURFTYPE_3D.
514 * There is no such blurb for 1D textures, but there is sufficient evidence
515 * that this is broken on SKL+.
517 // XXX: i965 disables this for arrays too, is that reasonable?
518 return res
->surf
.samples
== 1 && res
->surf
.dim
== ISL_SURF_DIM_2D
;
522 * Perform a HiZ or depth resolve operation.
524 * For an overview of HiZ ops, see the following sections of the Sandy Bridge
525 * PRM, Volume 1, Part 2:
526 * - 7.5.3.1 Depth Buffer Clear
527 * - 7.5.3.2 Depth Buffer Resolve
528 * - 7.5.3.3 Hierarchical Depth Buffer Resolve
531 iris_hiz_exec(struct iris_context
*ice
,
532 struct iris_batch
*batch
,
533 struct iris_resource
*res
,
534 unsigned int level
, unsigned int start_layer
,
535 unsigned int num_layers
, enum isl_aux_op op
)
537 assert(iris_resource_level_has_hiz(res
, level
));
538 assert(op
!= ISL_AUX_OP_NONE
);
539 const char *name
= NULL
;
542 case ISL_AUX_OP_FULL_RESOLVE
:
543 name
= "depth resolve";
545 case ISL_AUX_OP_AMBIGUATE
:
546 name
= "hiz ambiguate";
548 case ISL_AUX_OP_FAST_CLEAR
:
549 name
= "depth clear";
551 case ISL_AUX_OP_PARTIAL_RESOLVE
:
552 case ISL_AUX_OP_NONE
:
553 unreachable("Invalid HiZ op");
556 //DBG("%s %s to mt %p level %d layers %d-%d\n",
557 //__func__, name, mt, level, start_layer, start_layer + num_layers - 1);
559 /* The following stalls and flushes are only documented to be required
560 * for HiZ clear operations. However, they also seem to be required for
561 * resolve operations.
563 * From the Ivybridge PRM, volume 2, "Depth Buffer Clear":
565 * "If other rendering operations have preceded this clear, a
566 * PIPE_CONTROL with depth cache flush enabled, Depth Stall bit
567 * enabled must be issued before the rectangle primitive used for
568 * the depth buffer clear operation."
570 * Same applies for Gen8 and Gen9.
572 * In addition, from the Ivybridge PRM, volume 2, 1.10.4.1
573 * PIPE_CONTROL, Depth Cache Flush Enable:
575 * "This bit must not be set when Depth Stall Enable bit is set in
578 * This is confirmed to hold for real, Haswell gets immediate gpu hangs.
580 * Therefore issue two pipe control flushes, one for cache flush and
581 * another for depth stall.
583 iris_emit_pipe_control_flush(batch
,
584 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
585 PIPE_CONTROL_CS_STALL
);
587 iris_emit_pipe_control_flush(batch
, PIPE_CONTROL_DEPTH_STALL
);
589 assert(res
->aux
.usage
== ISL_AUX_USAGE_HIZ
&& res
->aux
.bo
);
591 iris_batch_maybe_flush(batch
, 1500);
593 struct blorp_surf surf
;
594 iris_blorp_surf_for_resource(&ice
->vtbl
, &surf
, &res
->base
,
595 ISL_AUX_USAGE_HIZ
, level
, true);
597 struct blorp_batch blorp_batch
;
598 blorp_batch_init(&ice
->blorp
, &blorp_batch
, batch
,
599 BLORP_BATCH_NO_UPDATE_CLEAR_COLOR
);
600 blorp_hiz_op(&blorp_batch
, &surf
, level
, start_layer
, num_layers
, op
);
601 blorp_batch_finish(&blorp_batch
);
603 /* The following stalls and flushes are only documented to be required
604 * for HiZ clear operations. However, they also seem to be required for
605 * resolve operations.
607 * From the Broadwell PRM, volume 7, "Depth Buffer Clear":
609 * "Depth buffer clear pass using any of the methods (WM_STATE,
610 * 3DSTATE_WM or 3DSTATE_WM_HZ_OP) must be followed by a
611 * PIPE_CONTROL command with DEPTH_STALL bit and Depth FLUSH bits
612 * "set" before starting to render. DepthStall and DepthFlush are
613 * not needed between consecutive depth clear passes nor is it
614 * required if the depth clear pass was done with
615 * 'full_surf_clear' bit set in the 3DSTATE_WM_HZ_OP."
617 * TODO: Such as the spec says, this could be conditional.
619 iris_emit_pipe_control_flush(batch
,
620 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
621 PIPE_CONTROL_DEPTH_STALL
);
625 * Does the resource's slice have hiz enabled?
628 iris_resource_level_has_hiz(const struct iris_resource
*res
, uint32_t level
)
630 iris_resource_check_level_layer(res
, level
, 0);
631 return res
->aux
.has_hiz
& 1 << level
;
634 /** \brief Assert that the level and layer are valid for the resource. */
636 iris_resource_check_level_layer(UNUSED
const struct iris_resource
*res
,
637 UNUSED
uint32_t level
, UNUSED
uint32_t layer
)
639 assert(level
< res
->surf
.levels
);
640 assert(layer
< util_num_layers(&res
->base
, level
));
643 static inline uint32_t
644 miptree_level_range_length(const struct iris_resource
*res
,
645 uint32_t start_level
, uint32_t num_levels
)
647 assert(start_level
< res
->surf
.levels
);
649 if (num_levels
== INTEL_REMAINING_LAYERS
)
650 num_levels
= res
->surf
.levels
;
652 /* Check for overflow */
653 assert(start_level
+ num_levels
>= start_level
);
654 assert(start_level
+ num_levels
<= res
->surf
.levels
);
659 static inline uint32_t
660 miptree_layer_range_length(const struct iris_resource
*res
, uint32_t level
,
661 uint32_t start_layer
, uint32_t num_layers
)
663 assert(level
<= res
->base
.last_level
);
665 const uint32_t total_num_layers
= iris_get_num_logical_layers(res
, level
);
666 assert(start_layer
< total_num_layers
);
667 if (num_layers
== INTEL_REMAINING_LAYERS
)
668 num_layers
= total_num_layers
- start_layer
;
669 /* Check for overflow */
670 assert(start_layer
+ num_layers
>= start_layer
);
671 assert(start_layer
+ num_layers
<= total_num_layers
);
677 has_color_unresolved(const struct iris_resource
*res
,
678 unsigned start_level
, unsigned num_levels
,
679 unsigned start_layer
, unsigned num_layers
)
684 /* Clamp the level range to fit the resource */
685 num_levels
= miptree_level_range_length(res
, start_level
, num_levels
);
687 for (uint32_t l
= 0; l
< num_levels
; l
++) {
688 const uint32_t level
= start_level
+ l
;
689 const uint32_t level_layers
=
690 miptree_layer_range_length(res
, level
, start_layer
, num_layers
);
691 for (unsigned a
= 0; a
< level_layers
; a
++) {
692 enum isl_aux_state aux_state
=
693 iris_resource_get_aux_state(res
, level
, start_layer
+ a
);
694 assert(aux_state
!= ISL_AUX_STATE_AUX_INVALID
);
695 if (aux_state
!= ISL_AUX_STATE_PASS_THROUGH
)
703 static enum isl_aux_op
704 get_ccs_d_resolve_op(enum isl_aux_state aux_state
,
705 enum isl_aux_usage aux_usage
,
706 bool fast_clear_supported
)
708 assert(aux_usage
== ISL_AUX_USAGE_NONE
|| aux_usage
== ISL_AUX_USAGE_CCS_D
);
710 const bool ccs_supported
= aux_usage
== ISL_AUX_USAGE_CCS_D
;
712 assert(ccs_supported
== fast_clear_supported
);
715 case ISL_AUX_STATE_CLEAR
:
716 case ISL_AUX_STATE_PARTIAL_CLEAR
:
718 return ISL_AUX_OP_FULL_RESOLVE
;
720 return ISL_AUX_OP_NONE
;
722 case ISL_AUX_STATE_PASS_THROUGH
:
723 return ISL_AUX_OP_NONE
;
725 case ISL_AUX_STATE_RESOLVED
:
726 case ISL_AUX_STATE_AUX_INVALID
:
727 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
728 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
732 unreachable("Invalid aux state for CCS_D");
735 static enum isl_aux_op
736 get_ccs_e_resolve_op(enum isl_aux_state aux_state
,
737 enum isl_aux_usage aux_usage
,
738 bool fast_clear_supported
)
740 /* CCS_E surfaces can be accessed as CCS_D if we're careful. */
741 assert(aux_usage
== ISL_AUX_USAGE_NONE
||
742 aux_usage
== ISL_AUX_USAGE_CCS_D
||
743 aux_usage
== ISL_AUX_USAGE_CCS_E
);
745 if (aux_usage
== ISL_AUX_USAGE_CCS_D
)
746 assert(fast_clear_supported
);
749 case ISL_AUX_STATE_CLEAR
:
750 case ISL_AUX_STATE_PARTIAL_CLEAR
:
751 if (fast_clear_supported
)
752 return ISL_AUX_OP_NONE
;
753 else if (aux_usage
== ISL_AUX_USAGE_CCS_E
)
754 return ISL_AUX_OP_PARTIAL_RESOLVE
;
756 return ISL_AUX_OP_FULL_RESOLVE
;
758 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
759 if (aux_usage
!= ISL_AUX_USAGE_CCS_E
)
760 return ISL_AUX_OP_FULL_RESOLVE
;
761 else if (!fast_clear_supported
)
762 return ISL_AUX_OP_PARTIAL_RESOLVE
;
764 return ISL_AUX_OP_NONE
;
766 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
767 if (aux_usage
!= ISL_AUX_USAGE_CCS_E
)
768 return ISL_AUX_OP_FULL_RESOLVE
;
770 return ISL_AUX_OP_NONE
;
772 case ISL_AUX_STATE_PASS_THROUGH
:
773 return ISL_AUX_OP_NONE
;
775 case ISL_AUX_STATE_RESOLVED
:
776 case ISL_AUX_STATE_AUX_INVALID
:
780 unreachable("Invalid aux state for CCS_E");
784 iris_resource_prepare_ccs_access(struct iris_context
*ice
,
785 struct iris_batch
*batch
,
786 struct iris_resource
*res
,
787 uint32_t level
, uint32_t layer
,
788 enum isl_aux_usage aux_usage
,
789 bool fast_clear_supported
)
791 enum isl_aux_state aux_state
= iris_resource_get_aux_state(res
, level
, layer
);
793 enum isl_aux_op resolve_op
;
794 if (res
->aux
.usage
== ISL_AUX_USAGE_CCS_E
) {
795 resolve_op
= get_ccs_e_resolve_op(aux_state
, aux_usage
,
796 fast_clear_supported
);
798 assert(res
->aux
.usage
== ISL_AUX_USAGE_CCS_D
);
799 resolve_op
= get_ccs_d_resolve_op(aux_state
, aux_usage
,
800 fast_clear_supported
);
803 if (resolve_op
!= ISL_AUX_OP_NONE
) {
804 iris_resolve_color(ice
, batch
, res
, level
, layer
, resolve_op
);
806 switch (resolve_op
) {
807 case ISL_AUX_OP_FULL_RESOLVE
:
808 /* The CCS full resolve operation destroys the CCS and sets it to the
809 * pass-through state. (You can also think of this as being both a
810 * resolve and an ambiguate in one operation.)
812 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1,
813 ISL_AUX_STATE_PASS_THROUGH
);
816 case ISL_AUX_OP_PARTIAL_RESOLVE
:
817 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1,
818 ISL_AUX_STATE_COMPRESSED_NO_CLEAR
);
822 unreachable("Invalid resolve op");
828 iris_resource_finish_ccs_write(struct iris_context
*ice
,
829 struct iris_resource
*res
,
830 uint32_t level
, uint32_t layer
,
831 enum isl_aux_usage aux_usage
)
833 assert(aux_usage
== ISL_AUX_USAGE_NONE
||
834 aux_usage
== ISL_AUX_USAGE_CCS_D
||
835 aux_usage
== ISL_AUX_USAGE_CCS_E
);
837 enum isl_aux_state aux_state
=
838 iris_resource_get_aux_state(res
, level
, layer
);
840 if (res
->aux
.usage
== ISL_AUX_USAGE_CCS_E
) {
842 case ISL_AUX_STATE_CLEAR
:
843 case ISL_AUX_STATE_PARTIAL_CLEAR
:
844 assert(aux_usage
== ISL_AUX_USAGE_CCS_E
||
845 aux_usage
== ISL_AUX_USAGE_CCS_D
);
847 if (aux_usage
== ISL_AUX_USAGE_CCS_E
) {
848 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1,
849 ISL_AUX_STATE_COMPRESSED_CLEAR
);
850 } else if (aux_state
!= ISL_AUX_STATE_PARTIAL_CLEAR
) {
851 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1,
852 ISL_AUX_STATE_PARTIAL_CLEAR
);
856 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
857 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
858 assert(aux_usage
== ISL_AUX_USAGE_CCS_E
);
859 break; /* Nothing to do */
861 case ISL_AUX_STATE_PASS_THROUGH
:
862 if (aux_usage
== ISL_AUX_USAGE_CCS_E
) {
863 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1,
864 ISL_AUX_STATE_COMPRESSED_NO_CLEAR
);
870 case ISL_AUX_STATE_RESOLVED
:
871 case ISL_AUX_STATE_AUX_INVALID
:
872 unreachable("Invalid aux state for CCS_E");
875 assert(res
->aux
.usage
== ISL_AUX_USAGE_CCS_D
);
876 /* CCS_D is a bit simpler */
878 case ISL_AUX_STATE_CLEAR
:
879 assert(aux_usage
== ISL_AUX_USAGE_CCS_D
);
880 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1,
881 ISL_AUX_STATE_PARTIAL_CLEAR
);
884 case ISL_AUX_STATE_PARTIAL_CLEAR
:
885 assert(aux_usage
== ISL_AUX_USAGE_CCS_D
);
886 break; /* Nothing to do */
888 case ISL_AUX_STATE_PASS_THROUGH
:
892 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
893 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
894 case ISL_AUX_STATE_RESOLVED
:
895 case ISL_AUX_STATE_AUX_INVALID
:
896 unreachable("Invalid aux state for CCS_D");
902 iris_resource_prepare_mcs_access(struct iris_context
*ice
,
903 struct iris_batch
*batch
,
904 struct iris_resource
*res
,
906 enum isl_aux_usage aux_usage
,
907 bool fast_clear_supported
)
909 assert(aux_usage
== ISL_AUX_USAGE_MCS
);
911 switch (iris_resource_get_aux_state(res
, 0, layer
)) {
912 case ISL_AUX_STATE_CLEAR
:
913 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
914 if (!fast_clear_supported
) {
915 iris_mcs_partial_resolve(ice
, batch
, res
, layer
, 1);
916 iris_resource_set_aux_state(ice
, res
, 0, layer
, 1,
917 ISL_AUX_STATE_COMPRESSED_NO_CLEAR
);
921 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
922 break; /* Nothing to do */
924 case ISL_AUX_STATE_RESOLVED
:
925 case ISL_AUX_STATE_PASS_THROUGH
:
926 case ISL_AUX_STATE_AUX_INVALID
:
927 case ISL_AUX_STATE_PARTIAL_CLEAR
:
928 unreachable("Invalid aux state for MCS");
933 iris_resource_finish_mcs_write(struct iris_context
*ice
,
934 struct iris_resource
*res
,
936 enum isl_aux_usage aux_usage
)
938 assert(aux_usage
== ISL_AUX_USAGE_MCS
);
940 switch (iris_resource_get_aux_state(res
, 0, layer
)) {
941 case ISL_AUX_STATE_CLEAR
:
942 iris_resource_set_aux_state(ice
, res
, 0, layer
, 1,
943 ISL_AUX_STATE_COMPRESSED_CLEAR
);
946 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
947 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
948 break; /* Nothing to do */
950 case ISL_AUX_STATE_RESOLVED
:
951 case ISL_AUX_STATE_PASS_THROUGH
:
952 case ISL_AUX_STATE_AUX_INVALID
:
953 case ISL_AUX_STATE_PARTIAL_CLEAR
:
954 unreachable("Invalid aux state for MCS");
959 iris_resource_prepare_hiz_access(struct iris_context
*ice
,
960 struct iris_batch
*batch
,
961 struct iris_resource
*res
,
962 uint32_t level
, uint32_t layer
,
963 enum isl_aux_usage aux_usage
,
964 bool fast_clear_supported
)
966 assert(aux_usage
== ISL_AUX_USAGE_NONE
|| aux_usage
== ISL_AUX_USAGE_HIZ
);
968 enum isl_aux_op hiz_op
= ISL_AUX_OP_NONE
;
969 switch (iris_resource_get_aux_state(res
, level
, layer
)) {
970 case ISL_AUX_STATE_CLEAR
:
971 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
972 if (aux_usage
!= ISL_AUX_USAGE_HIZ
|| !fast_clear_supported
)
973 hiz_op
= ISL_AUX_OP_FULL_RESOLVE
;
976 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
977 if (aux_usage
!= ISL_AUX_USAGE_HIZ
)
978 hiz_op
= ISL_AUX_OP_FULL_RESOLVE
;
981 case ISL_AUX_STATE_PASS_THROUGH
:
982 case ISL_AUX_STATE_RESOLVED
:
985 case ISL_AUX_STATE_AUX_INVALID
:
986 if (aux_usage
== ISL_AUX_USAGE_HIZ
)
987 hiz_op
= ISL_AUX_OP_AMBIGUATE
;
990 case ISL_AUX_STATE_PARTIAL_CLEAR
:
991 unreachable("Invalid HiZ state");
994 if (hiz_op
!= ISL_AUX_OP_NONE
) {
995 iris_hiz_exec(ice
, batch
, res
, level
, layer
, 1, hiz_op
);
998 case ISL_AUX_OP_FULL_RESOLVE
:
999 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1,
1000 ISL_AUX_STATE_RESOLVED
);
1003 case ISL_AUX_OP_AMBIGUATE
:
1004 /* The HiZ resolve operation is actually an ambiguate */
1005 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1,
1006 ISL_AUX_STATE_PASS_THROUGH
);
1010 unreachable("Invalid HiZ op");
1016 iris_resource_finish_hiz_write(struct iris_context
*ice
,
1017 struct iris_resource
*res
,
1018 uint32_t level
, uint32_t layer
,
1019 enum isl_aux_usage aux_usage
)
1021 assert(aux_usage
== ISL_AUX_USAGE_NONE
|| aux_usage
== ISL_AUX_USAGE_HIZ
);
1023 switch (iris_resource_get_aux_state(res
, level
, layer
)) {
1024 case ISL_AUX_STATE_CLEAR
:
1025 assert(aux_usage
== ISL_AUX_USAGE_HIZ
);
1026 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1,
1027 ISL_AUX_STATE_COMPRESSED_CLEAR
);
1030 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
1031 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
1032 assert(aux_usage
== ISL_AUX_USAGE_HIZ
);
1033 break; /* Nothing to do */
1035 case ISL_AUX_STATE_RESOLVED
:
1036 if (aux_usage
== ISL_AUX_USAGE_HIZ
) {
1037 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1,
1038 ISL_AUX_STATE_COMPRESSED_NO_CLEAR
);
1040 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1,
1041 ISL_AUX_STATE_AUX_INVALID
);
1045 case ISL_AUX_STATE_PASS_THROUGH
:
1046 if (aux_usage
== ISL_AUX_USAGE_HIZ
) {
1047 iris_resource_set_aux_state(ice
, res
, level
, layer
, 1,
1048 ISL_AUX_STATE_COMPRESSED_NO_CLEAR
);
1052 case ISL_AUX_STATE_AUX_INVALID
:
1053 assert(aux_usage
!= ISL_AUX_USAGE_HIZ
);
1056 case ISL_AUX_STATE_PARTIAL_CLEAR
:
1057 unreachable("Invalid HiZ state");
1062 iris_resource_prepare_access(struct iris_context
*ice
,
1063 struct iris_batch
*batch
,
1064 struct iris_resource
*res
,
1065 uint32_t start_level
, uint32_t num_levels
,
1066 uint32_t start_layer
, uint32_t num_layers
,
1067 enum isl_aux_usage aux_usage
,
1068 bool fast_clear_supported
)
1070 num_levels
= miptree_level_range_length(res
, start_level
, num_levels
);
1072 switch (res
->aux
.usage
) {
1073 case ISL_AUX_USAGE_NONE
:
1077 case ISL_AUX_USAGE_MCS
:
1078 assert(start_level
== 0 && num_levels
== 1);
1079 const uint32_t level_layers
=
1080 miptree_layer_range_length(res
, 0, start_layer
, num_layers
);
1081 for (uint32_t a
= 0; a
< level_layers
; a
++) {
1082 iris_resource_prepare_mcs_access(ice
, batch
, res
, start_layer
+ a
,
1083 aux_usage
, fast_clear_supported
);
1087 case ISL_AUX_USAGE_CCS_D
:
1088 case ISL_AUX_USAGE_CCS_E
:
1089 for (uint32_t l
= 0; l
< num_levels
; l
++) {
1090 const uint32_t level
= start_level
+ l
;
1091 const uint32_t level_layers
=
1092 miptree_layer_range_length(res
, level
, start_layer
, num_layers
);
1093 for (uint32_t a
= 0; a
< level_layers
; a
++) {
1094 iris_resource_prepare_ccs_access(ice
, batch
, res
, level
,
1096 aux_usage
, fast_clear_supported
);
1101 case ISL_AUX_USAGE_HIZ
:
1102 for (uint32_t l
= 0; l
< num_levels
; l
++) {
1103 const uint32_t level
= start_level
+ l
;
1104 if (!iris_resource_level_has_hiz(res
, level
))
1107 const uint32_t level_layers
=
1108 miptree_layer_range_length(res
, level
, start_layer
, num_layers
);
1109 for (uint32_t a
= 0; a
< level_layers
; a
++) {
1110 iris_resource_prepare_hiz_access(ice
, batch
, res
, level
,
1111 start_layer
+ a
, aux_usage
,
1112 fast_clear_supported
);
1118 unreachable("Invalid aux usage");
1123 iris_resource_finish_write(struct iris_context
*ice
,
1124 struct iris_resource
*res
, uint32_t level
,
1125 uint32_t start_layer
, uint32_t num_layers
,
1126 enum isl_aux_usage aux_usage
)
1128 num_layers
= miptree_layer_range_length(res
, level
, start_layer
, num_layers
);
1130 switch (res
->aux
.usage
) {
1131 case ISL_AUX_USAGE_NONE
:
1134 case ISL_AUX_USAGE_MCS
:
1135 for (uint32_t a
= 0; a
< num_layers
; a
++) {
1136 iris_resource_finish_mcs_write(ice
, res
, start_layer
+ a
,
1141 case ISL_AUX_USAGE_CCS_D
:
1142 case ISL_AUX_USAGE_CCS_E
:
1143 for (uint32_t a
= 0; a
< num_layers
; a
++) {
1144 iris_resource_finish_ccs_write(ice
, res
, level
, start_layer
+ a
,
1149 case ISL_AUX_USAGE_HIZ
:
1150 if (!iris_resource_level_has_hiz(res
, level
))
1153 for (uint32_t a
= 0; a
< num_layers
; a
++) {
1154 iris_resource_finish_hiz_write(ice
, res
, level
, start_layer
+ a
,
1160 unreachable("Invavlid aux usage");
1165 iris_resource_get_aux_state(const struct iris_resource
*res
,
1166 uint32_t level
, uint32_t layer
)
1168 iris_resource_check_level_layer(res
, level
, layer
);
1170 if (res
->surf
.usage
& ISL_SURF_USAGE_DEPTH_BIT
) {
1171 assert(iris_resource_level_has_hiz(res
, level
));
1172 } else if (res
->surf
.usage
& ISL_SURF_USAGE_STENCIL_BIT
) {
1173 unreachable("Cannot get aux state for stencil");
1175 assert(res
->surf
.samples
== 1 ||
1176 res
->surf
.msaa_layout
== ISL_MSAA_LAYOUT_ARRAY
);
1179 return res
->aux
.state
[level
][layer
];
1183 iris_resource_set_aux_state(struct iris_context
*ice
,
1184 struct iris_resource
*res
, uint32_t level
,
1185 uint32_t start_layer
, uint32_t num_layers
,
1186 enum isl_aux_state aux_state
)
1188 num_layers
= miptree_layer_range_length(res
, level
, start_layer
, num_layers
);
1190 if (res
->surf
.usage
& ISL_SURF_USAGE_DEPTH_BIT
) {
1191 assert(iris_resource_level_has_hiz(res
, level
));
1192 } else if (res
->surf
.usage
& ISL_SURF_USAGE_STENCIL_BIT
) {
1193 unreachable("Cannot set aux state for stencil");
1195 assert(res
->surf
.samples
== 1 ||
1196 res
->surf
.msaa_layout
== ISL_MSAA_LAYOUT_ARRAY
);
1199 for (unsigned a
= 0; a
< num_layers
; a
++) {
1200 if (res
->aux
.state
[level
][start_layer
+ a
] != aux_state
) {
1201 res
->aux
.state
[level
][start_layer
+ a
] = aux_state
;
1202 /* XXX: Need to track which bindings to make dirty */
1203 ice
->state
.dirty
|= IRIS_ALL_DIRTY_BINDINGS
;
1208 /* On Gen9 color buffers may be compressed by the hardware (lossless
1209 * compression). There are, however, format restrictions and care needs to be
1210 * taken that the sampler engine is capable for re-interpreting a buffer with
1211 * format different the buffer was originally written with.
1213 * For example, SRGB formats are not compressible and the sampler engine isn't
1214 * capable of treating RGBA_UNORM as SRGB_ALPHA. In such a case the underlying
1215 * color buffer needs to be resolved so that the sampling surface can be
1216 * sampled as non-compressed (i.e., without the auxiliary MCS buffer being
1220 can_texture_with_ccs(const struct gen_device_info
*devinfo
,
1221 struct pipe_debug_callback
*dbg
,
1222 const struct iris_resource
*res
,
1223 enum isl_format view_format
)
1225 if (res
->aux
.usage
!= ISL_AUX_USAGE_CCS_E
)
1228 if (!format_ccs_e_compat_with_resource(devinfo
, res
, view_format
)) {
1229 const struct isl_format_layout
*res_fmtl
=
1230 isl_format_get_layout(res
->surf
.format
);
1231 const struct isl_format_layout
*view_fmtl
=
1232 isl_format_get_layout(view_format
);
1234 perf_debug(dbg
, "Incompatible sampling format (%s) for CCS (%s)\n",
1235 view_fmtl
->name
, res_fmtl
->name
);
1244 iris_resource_texture_aux_usage(struct iris_context
*ice
,
1245 const struct iris_resource
*res
,
1246 enum isl_format view_format
,
1247 enum gen9_astc5x5_wa_tex_type astc5x5_wa_bits
)
1249 struct iris_screen
*screen
= (void *) ice
->ctx
.screen
;
1250 struct gen_device_info
*devinfo
= &screen
->devinfo
;
1252 assert(devinfo
->gen
== 9 || astc5x5_wa_bits
== 0);
1254 /* On gen9, ASTC 5x5 textures cannot live in the sampler cache along side
1255 * CCS or HiZ compressed textures. See gen9_apply_astc5x5_wa_flush() for
1258 if ((astc5x5_wa_bits
& GEN9_ASTC5X5_WA_TEX_TYPE_ASTC5x5
) &&
1259 res
->aux
.usage
!= ISL_AUX_USAGE_MCS
)
1260 return ISL_AUX_USAGE_NONE
;
1262 switch (res
->aux
.usage
) {
1263 case ISL_AUX_USAGE_HIZ
:
1264 if (sample_with_hiz(devinfo
, res
))
1265 return ISL_AUX_USAGE_HIZ
;
1268 case ISL_AUX_USAGE_MCS
:
1269 return ISL_AUX_USAGE_MCS
;
1271 case ISL_AUX_USAGE_CCS_D
:
1272 case ISL_AUX_USAGE_CCS_E
:
1273 /* If we don't have any unresolved color, report an aux usage of
1274 * ISL_AUX_USAGE_NONE. This way, texturing won't even look at the
1275 * aux surface and we can save some bandwidth.
1277 if (!has_color_unresolved(res
, 0, INTEL_REMAINING_LEVELS
,
1278 0, INTEL_REMAINING_LAYERS
))
1279 return ISL_AUX_USAGE_NONE
;
1281 if (can_texture_with_ccs(devinfo
, &ice
->dbg
, res
, view_format
))
1282 return ISL_AUX_USAGE_CCS_E
;
1289 return ISL_AUX_USAGE_NONE
;
1293 isl_formats_are_fast_clear_compatible(enum isl_format a
, enum isl_format b
)
1295 /* On gen8 and earlier, the hardware was only capable of handling 0/1 clear
1296 * values so sRGB curve application was a no-op for all fast-clearable
1299 * On gen9+, the hardware supports arbitrary clear values. For sRGB clear
1300 * values, the hardware interprets the floats, not as what would be
1301 * returned from the sampler (or written by the shader), but as being
1302 * between format conversion and sRGB curve application. This means that
1303 * we can switch between sRGB and UNORM without having to whack the clear
1306 return isl_format_srgb_to_linear(a
) == isl_format_srgb_to_linear(b
);
1310 iris_resource_prepare_texture(struct iris_context
*ice
,
1311 struct iris_batch
*batch
,
1312 struct iris_resource
*res
,
1313 enum isl_format view_format
,
1314 uint32_t start_level
, uint32_t num_levels
,
1315 uint32_t start_layer
, uint32_t num_layers
,
1316 enum gen9_astc5x5_wa_tex_type astc5x5_wa_bits
)
1318 enum isl_aux_usage aux_usage
=
1319 iris_resource_texture_aux_usage(ice
, res
, view_format
, astc5x5_wa_bits
);
1321 bool clear_supported
= aux_usage
!= ISL_AUX_USAGE_NONE
;
1323 /* Clear color is specified as ints or floats and the conversion is done by
1324 * the sampler. If we have a texture view, we would have to perform the
1325 * clear color conversion manually. Just disable clear color.
1327 if (!isl_formats_are_fast_clear_compatible(res
->surf
.format
, view_format
))
1328 clear_supported
= false;
1330 iris_resource_prepare_access(ice
, batch
, res
, start_level
, num_levels
,
1331 start_layer
, num_layers
,
1332 aux_usage
, clear_supported
);
1336 iris_resource_prepare_image(struct iris_context
*ice
,
1337 struct iris_batch
*batch
,
1338 struct iris_resource
*res
)
1340 /* The data port doesn't understand any compression */
1341 iris_resource_prepare_access(ice
, batch
, res
, 0, INTEL_REMAINING_LEVELS
,
1342 0, INTEL_REMAINING_LAYERS
,
1343 ISL_AUX_USAGE_NONE
, false);
1347 iris_resource_render_aux_usage(struct iris_context
*ice
,
1348 struct iris_resource
*res
,
1349 enum isl_format render_format
,
1351 bool draw_aux_disabled
)
1353 struct iris_screen
*screen
= (void *) ice
->ctx
.screen
;
1354 struct gen_device_info
*devinfo
= &screen
->devinfo
;
1356 if (draw_aux_disabled
)
1357 return ISL_AUX_USAGE_NONE
;
1359 switch (res
->aux
.usage
) {
1360 case ISL_AUX_USAGE_MCS
:
1361 return ISL_AUX_USAGE_MCS
;
1363 case ISL_AUX_USAGE_CCS_D
:
1364 case ISL_AUX_USAGE_CCS_E
:
1365 /* Gen9+ hardware technically supports non-0/1 clear colors with sRGB
1366 * formats. However, there are issues with blending where it doesn't
1367 * properly apply the sRGB curve to the clear color when blending.
1370 if (devinfo->gen >= 9 && blend_enabled &&
1371 isl_format_is_srgb(render_format) &&
1372 !isl_color_value_is_zero_one(res->fast_clear_color, render_format))
1373 return ISL_AUX_USAGE_NONE;
1376 if (res
->aux
.usage
== ISL_AUX_USAGE_CCS_E
&&
1377 format_ccs_e_compat_with_resource(devinfo
, res
, render_format
))
1378 return ISL_AUX_USAGE_CCS_E
;
1380 /* Otherwise, we have to fall back to CCS_D */
1381 return ISL_AUX_USAGE_CCS_D
;
1384 return ISL_AUX_USAGE_NONE
;
1389 iris_resource_prepare_render(struct iris_context
*ice
,
1390 struct iris_batch
*batch
,
1391 struct iris_resource
*res
, uint32_t level
,
1392 uint32_t start_layer
, uint32_t layer_count
,
1393 enum isl_aux_usage aux_usage
)
1395 iris_resource_prepare_access(ice
, batch
, res
, level
, 1, start_layer
,
1396 layer_count
, aux_usage
,
1397 aux_usage
!= ISL_AUX_USAGE_NONE
);
1401 iris_resource_finish_render(struct iris_context
*ice
,
1402 struct iris_resource
*res
, uint32_t level
,
1403 uint32_t start_layer
, uint32_t layer_count
,
1404 enum isl_aux_usage aux_usage
)
1406 iris_resource_finish_write(ice
, res
, level
, start_layer
, layer_count
,
1411 iris_resource_prepare_depth(struct iris_context
*ice
,
1412 struct iris_batch
*batch
,
1413 struct iris_resource
*res
, uint32_t level
,
1414 uint32_t start_layer
, uint32_t layer_count
)
1416 iris_resource_prepare_access(ice
, batch
, res
, level
, 1, start_layer
,
1417 layer_count
, res
->aux
.usage
, !!res
->aux
.bo
);
1421 iris_resource_finish_depth(struct iris_context
*ice
,
1422 struct iris_resource
*res
, uint32_t level
,
1423 uint32_t start_layer
, uint32_t layer_count
,
1426 if (depth_written
) {
1427 iris_resource_finish_write(ice
, res
, level
, start_layer
, layer_count
,