2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
24 * @file iris_resolve.c
26 * This file handles resolve tracking for main and auxiliary surfaces.
28 * It also handles our cache tracking. We have sets for the render cache,
29 * depth cache, and so on. If a BO is in a cache's set, then it may have
30 * data in that cache. The helpers take care of emitting flushes for
31 * render-to-texture, format reinterpretation issues, and other situations.
34 #include "util/hash_table.h"
36 #include "iris_context.h"
39 * Disable auxiliary buffers if a renderbuffer is also bound as a texture
40 * or shader image. This causes a self-dependency, where both rendering
41 * and sampling may concurrently read or write the CCS buffer, causing
45 disable_rb_aux_buffer(struct iris_context
*ice
,
46 bool *draw_aux_buffer_disabled
,
47 struct iris_resource
*tex_res
,
48 unsigned min_level
, unsigned num_levels
,
51 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
54 /* We only need to worry about color compression and fast clears. */
55 if (tex_res
->aux
.usage
!= ISL_AUX_USAGE_CCS_D
&&
56 tex_res
->aux
.usage
!= ISL_AUX_USAGE_CCS_E
)
59 for (unsigned i
= 0; i
< cso_fb
->nr_cbufs
; i
++) {
60 struct iris_surface
*surf
= (void *) cso_fb
->cbufs
[i
];
64 struct iris_resource
*rb_res
= (void *) surf
->base
.texture
;
66 if (rb_res
->bo
== tex_res
->bo
&&
67 surf
->base
.u
.tex
.level
>= min_level
&&
68 surf
->base
.u
.tex
.level
< min_level
+ num_levels
) {
69 found
= draw_aux_buffer_disabled
[i
] = true;
75 "Disabling CCS because a renderbuffer is also bound %s.\n",
83 resolve_sampler_views(struct iris_context
*ice
,
84 struct iris_batch
*batch
,
85 struct iris_shader_state
*shs
,
86 bool *draw_aux_buffer_disabled
)
88 uint32_t views
= shs
->bound_sampler_views
;
90 unsigned astc5x5_wa_bits
= 0; // XXX: actual tracking
93 const int i
= u_bit_scan(&views
);
94 struct iris_sampler_view
*isv
= shs
->textures
[i
];
95 struct iris_resource
*res
= (void *) isv
->base
.texture
;
97 if (batch
->name
!= IRIS_BATCH_COMPUTE
) {
98 disable_rb_aux_buffer(ice
, draw_aux_buffer_disabled
,
99 res
, isv
->view
.base_level
, isv
->view
.levels
,
103 iris_resource_prepare_texture(ice
, batch
, res
, isv
->view
.format
,
104 isv
->view
.base_level
, isv
->view
.levels
,
105 isv
->view
.base_array_layer
,
109 iris_cache_flush_for_read(batch
, res
->bo
);
114 resolve_image_views(struct iris_context
*ice
,
115 struct iris_batch
*batch
,
116 struct iris_shader_state
*shs
,
117 bool *draw_aux_buffer_disabled
)
119 uint32_t views
= shs
->bound_image_views
;
122 const int i
= u_bit_scan(&views
);
123 struct iris_resource
*res
= (void *) shs
->image
[i
].res
;
125 if (batch
->name
!= IRIS_BATCH_COMPUTE
) {
126 disable_rb_aux_buffer(ice
, draw_aux_buffer_disabled
,
127 res
, 0, ~0, "as a shader image");
130 iris_resource_prepare_image(ice
, batch
, res
);
132 iris_cache_flush_for_read(batch
, res
->bo
);
138 * \brief Resolve buffers before drawing.
140 * Resolve the depth buffer's HiZ buffer, resolve the depth buffer of each
141 * enabled depth texture, and flush the render cache for any dirty textures.
144 iris_predraw_resolve_inputs(struct iris_context
*ice
,
145 struct iris_batch
*batch
,
146 struct iris_shader_state
*shs
,
147 bool *draw_aux_buffer_disabled
)
149 resolve_sampler_views(ice
, batch
, shs
, draw_aux_buffer_disabled
);
150 resolve_image_views(ice
, batch
, shs
, draw_aux_buffer_disabled
);
156 iris_predraw_resolve_framebuffer(struct iris_context
*ice
,
157 struct iris_batch
*batch
,
158 bool *draw_aux_buffer_disabled
)
160 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
161 struct pipe_surface
*zs_surf
= cso_fb
->zsbuf
;
164 struct iris_resource
*z_res
, *s_res
;
165 iris_get_depth_stencil_resources(zs_surf
->texture
, &z_res
, &s_res
);
166 unsigned num_layers
=
167 zs_surf
->u
.tex
.last_layer
- zs_surf
->u
.tex
.first_layer
+ 1;
170 iris_resource_prepare_depth(ice
, batch
, z_res
, zs_surf
->u
.tex
.level
,
171 zs_surf
->u
.tex
.first_layer
, num_layers
);
175 for (unsigned i
= 0; i
< cso_fb
->nr_cbufs
; i
++) {
176 struct iris_surface
*surf
= (void *) cso_fb
->cbufs
[i
];
180 struct iris_resource
*res
= (void *) surf
->base
.texture
;
182 enum isl_aux_usage aux_usage
=
183 iris_resource_render_aux_usage(ice
, res
, surf
->view
.format
,
184 ice
->state
.blend_enables
& (1u << i
),
185 draw_aux_buffer_disabled
[i
]);
187 // XXX: NEW_AUX_STATE
188 ice
->state
.draw_aux_usage
[i
] = aux_usage
;
190 iris_resource_prepare_render(ice
, batch
, res
, surf
->view
.base_level
,
191 surf
->view
.base_array_layer
,
192 surf
->view
.array_len
,
195 iris_cache_flush_for_render(batch
, res
->bo
, surf
->view
.format
,
201 * \brief Call this after drawing to mark which buffers need resolving
203 * If the depth buffer was written to and if it has an accompanying HiZ
204 * buffer, then mark that it needs a depth resolve.
206 * If the color buffer is a multisample window system buffer, then
207 * mark that it needs a downsample.
209 * Also mark any render targets which will be textured as needing a render
213 iris_postdraw_update_resolve_tracking(struct iris_context
*ice
,
214 struct iris_batch
*batch
)
216 struct pipe_framebuffer_state
*cso_fb
= &ice
->state
.framebuffer
;
217 struct pipe_surface
*zs_surf
= cso_fb
->zsbuf
;
219 // XXX: front buffer drawing?
222 struct iris_resource
*z_res
, *s_res
;
223 iris_get_depth_stencil_resources(zs_surf
->texture
, &z_res
, &s_res
);
224 unsigned num_layers
=
225 zs_surf
->u
.tex
.last_layer
- zs_surf
->u
.tex
.first_layer
+ 1;
228 iris_resource_finish_depth(ice
, z_res
, zs_surf
->u
.tex
.level
,
229 zs_surf
->u
.tex
.first_layer
, num_layers
,
230 ice
->state
.depth_writes_enabled
);
232 if (ice
->state
.depth_writes_enabled
)
233 iris_depth_cache_add_bo(batch
, z_res
->bo
);
237 iris_resource_finish_write(ice
, s_res
, zs_surf
->u
.tex
.level
,
238 zs_surf
->u
.tex
.first_layer
, num_layers
,
241 if (ice
->state
.stencil_writes_enabled
)
242 iris_depth_cache_add_bo(batch
, s_res
->bo
);
246 for (unsigned i
= 0; i
< cso_fb
->nr_cbufs
; i
++) {
247 struct iris_surface
*surf
= (void *) cso_fb
->cbufs
[i
];
251 struct iris_resource
*res
= (void *) surf
->base
.texture
;
252 union pipe_surface_desc
*desc
= &surf
->base
.u
;
253 unsigned num_layers
= desc
->tex
.last_layer
- desc
->tex
.first_layer
+ 1;
254 enum isl_aux_usage aux_usage
= ice
->state
.draw_aux_usage
[i
];
256 iris_render_cache_add_bo(batch
, res
->bo
, surf
->view
.format
, aux_usage
);
258 iris_resource_finish_render(ice
, res
, desc
->tex
.level
,
259 desc
->tex
.first_layer
, num_layers
,
265 * Clear the cache-tracking sets.
268 iris_cache_sets_clear(struct iris_batch
*batch
)
270 hash_table_foreach(batch
->cache
.render
, render_entry
)
271 _mesa_hash_table_remove(batch
->cache
.render
, render_entry
);
273 set_foreach(batch
->cache
.depth
, depth_entry
)
274 _mesa_set_remove(batch
->cache
.depth
, depth_entry
);
278 * Emits an appropriate flush for a BO if it has been rendered to within the
279 * same batchbuffer as a read that's about to be emitted.
281 * The GPU has separate, incoherent caches for the render cache and the
282 * sampler cache, along with other caches. Usually data in the different
283 * caches don't interact (e.g. we don't render to our driver-generated
284 * immediate constant data), but for render-to-texture in FBOs we definitely
285 * do. When a batchbuffer is flushed, the kernel will ensure that everything
286 * necessary is flushed before another use of that BO, but for reuse from
287 * different caches within a batchbuffer, it's all our responsibility.
290 iris_flush_depth_and_render_caches(struct iris_batch
*batch
)
292 iris_emit_pipe_control_flush(batch
,
293 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
294 PIPE_CONTROL_RENDER_TARGET_FLUSH
|
295 PIPE_CONTROL_CS_STALL
);
297 iris_emit_pipe_control_flush(batch
,
298 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
|
299 PIPE_CONTROL_CONST_CACHE_INVALIDATE
);
301 iris_cache_sets_clear(batch
);
305 iris_cache_flush_for_read(struct iris_batch
*batch
,
308 if (_mesa_hash_table_search_pre_hashed(batch
->cache
.render
, bo
->hash
, bo
) ||
309 _mesa_set_search_pre_hashed(batch
->cache
.depth
, bo
->hash
, bo
))
310 iris_flush_depth_and_render_caches(batch
);
314 format_aux_tuple(enum isl_format format
, enum isl_aux_usage aux_usage
)
316 return (void *)(uintptr_t)((uint32_t)format
<< 8 | aux_usage
);
320 iris_cache_flush_for_render(struct iris_batch
*batch
,
322 enum isl_format format
,
323 enum isl_aux_usage aux_usage
)
325 if (_mesa_set_search_pre_hashed(batch
->cache
.depth
, bo
->hash
, bo
))
326 iris_flush_depth_and_render_caches(batch
);
328 /* Check to see if this bo has been used by a previous rendering operation
329 * but with a different format or aux usage. If it has, flush the render
330 * cache so we ensure that it's only in there with one format or aux usage
333 * Even though it's not obvious, this can easily happen in practice.
334 * Suppose a client is blending on a surface with sRGB encode enabled on
335 * gen9. This implies that you get AUX_USAGE_CCS_D at best. If the client
336 * then disables sRGB decode and continues blending we will flip on
337 * AUX_USAGE_CCS_E without doing any sort of resolve in-between (this is
338 * perfectly valid since CCS_E is a subset of CCS_D). However, this means
339 * that we have fragments in-flight which are rendering with UNORM+CCS_E
340 * and other fragments in-flight with SRGB+CCS_D on the same surface at the
341 * same time and the pixel scoreboard and color blender are trying to sort
342 * it all out. This ends badly (i.e. GPU hangs).
344 * To date, we have never observed GPU hangs or even corruption to be
345 * associated with switching the format, only the aux usage. However,
346 * there are comments in various docs which indicate that the render cache
347 * isn't 100% resilient to format changes. We may as well be conservative
348 * and flush on format changes too. We can always relax this later if we
349 * find it to be a performance problem.
351 struct hash_entry
*entry
=
352 _mesa_hash_table_search_pre_hashed(batch
->cache
.render
, bo
->hash
, bo
);
353 if (entry
&& entry
->data
!= format_aux_tuple(format
, aux_usage
))
354 iris_flush_depth_and_render_caches(batch
);
358 iris_render_cache_add_bo(struct iris_batch
*batch
,
360 enum isl_format format
,
361 enum isl_aux_usage aux_usage
)
364 struct hash_entry
*entry
=
365 _mesa_hash_table_search_pre_hashed(batch
->cache
.render
, bo
->hash
, bo
);
367 /* Otherwise, someone didn't do a flush_for_render and that would be
370 assert(entry
->data
== format_aux_tuple(format
, aux_usage
));
374 _mesa_hash_table_insert_pre_hashed(batch
->cache
.render
, bo
->hash
, bo
,
375 format_aux_tuple(format
, aux_usage
));
379 iris_cache_flush_for_depth(struct iris_batch
*batch
,
382 if (_mesa_hash_table_search_pre_hashed(batch
->cache
.render
, bo
->hash
, bo
))
383 iris_flush_depth_and_render_caches(batch
);
387 iris_depth_cache_add_bo(struct iris_batch
*batch
, struct iris_bo
*bo
)
389 _mesa_set_add_pre_hashed(batch
->cache
.depth
, bo
->hash
, bo
);
393 iris_resolve_color(struct iris_context
*ice
,
394 struct iris_batch
*batch
,
395 struct iris_resource
*res
,
396 unsigned level
, unsigned layer
,
397 enum isl_aux_op resolve_op
)
399 //DBG("%s to mt %p level %u layer %u\n", __FUNCTION__, mt, level, layer);
401 struct blorp_surf surf
;
402 iris_blorp_surf_for_resource(&surf
, &res
->base
, res
->aux
.usage
, true);
404 iris_batch_maybe_flush(batch
, 1500);
406 /* Ivybridge PRM Vol 2, Part 1, "11.7 MCS Buffer for Render Target(s)":
408 * "Any transition from any value in {Clear, Render, Resolve} to a
409 * different value in {Clear, Render, Resolve} requires end of pipe
412 * In other words, fast clear ops are not properly synchronized with
413 * other drawing. We need to use a PIPE_CONTROL to ensure that the
414 * contents of the previous draw hit the render target before we resolve
415 * and again afterwards to ensure that the resolve is complete before we
416 * do any more regular drawing.
418 iris_emit_end_of_pipe_sync(batch
, PIPE_CONTROL_RENDER_TARGET_FLUSH
);
420 struct blorp_batch blorp_batch
;
421 blorp_batch_init(&ice
->blorp
, &blorp_batch
, batch
, 0);
422 blorp_ccs_resolve(&blorp_batch
, &surf
, level
, layer
, 1,
423 isl_format_srgb_to_linear(res
->surf
.format
),
425 blorp_batch_finish(&blorp_batch
);
427 /* See comment above */
428 iris_emit_end_of_pipe_sync(batch
, PIPE_CONTROL_RENDER_TARGET_FLUSH
);
432 iris_mcs_partial_resolve(struct iris_context
*ice
,
433 struct iris_batch
*batch
,
434 struct iris_resource
*res
,
435 uint32_t start_layer
,
438 //DBG("%s to mt %p layers %u-%u\n", __FUNCTION__, mt,
439 //start_layer, start_layer + num_layers - 1);
441 assert(res
->aux
.usage
== ISL_AUX_USAGE_MCS
);
443 struct blorp_surf surf
;
444 iris_blorp_surf_for_resource(&surf
, &res
->base
, res
->aux
.usage
, true);
446 struct blorp_batch blorp_batch
;
447 blorp_batch_init(&ice
->blorp
, &blorp_batch
, batch
, 0);
448 blorp_mcs_partial_resolve(&blorp_batch
, &surf
, res
->surf
.format
,
449 start_layer
, num_layers
);
450 blorp_batch_finish(&blorp_batch
);
455 * Return true if the format that will be used to access the resource is
456 * CCS_E-compatible with the resource's linear/non-sRGB format.
458 * Why use the linear format? Well, although the resourcemay be specified
459 * with an sRGB format, the usage of that color space/format can be toggled.
460 * Since our HW tends to support more linear formats than sRGB ones, we use
461 * this format variant for check for CCS_E compatibility.
464 format_ccs_e_compat_with_resource(const struct gen_device_info
*devinfo
,
465 const struct iris_resource
*res
,
466 enum isl_format access_format
)
468 assert(res
->aux
.usage
== ISL_AUX_USAGE_CCS_E
);
470 enum isl_format isl_format
= isl_format_srgb_to_linear(res
->surf
.format
);
471 return isl_formats_are_ccs_e_compatible(devinfo
, isl_format
, access_format
);
475 sample_with_hiz(const struct gen_device_info
*devinfo
,
476 const struct iris_resource
*res
)
478 if (!devinfo
->has_sample_with_hiz
)
481 if (res
->aux
.usage
!= ISL_AUX_USAGE_HIZ
)
484 /* It seems the hardware won't fallback to the depth buffer if some of the
485 * mipmap levels aren't available in the HiZ buffer. So we need all levels
486 * of the texture to be HiZ enabled.
488 for (unsigned level
= 0; level
< res
->surf
.levels
; ++level
) {
489 if (!iris_resource_level_has_hiz(res
, level
))
493 /* If compressed multisampling is enabled, then we use it for the auxiliary
496 * From the BDW PRM (Volume 2d: Command Reference: Structures
497 * RENDER_SURFACE_STATE.AuxiliarySurfaceMode):
499 * "If this field is set to AUX_HIZ, Number of Multisamples must be
500 * MULTISAMPLECOUNT_1, and Surface Type cannot be SURFTYPE_3D.
502 * There is no such blurb for 1D textures, but there is sufficient evidence
503 * that this is broken on SKL+.
505 // XXX: i965 disables this for arrays too, is that reasonable?
506 return res
->surf
.samples
== 1 && res
->surf
.dim
== ISL_SURF_DIM_2D
;
510 * Does the resource's slice have hiz enabled?
513 iris_resource_level_has_hiz(const struct iris_resource
*res
, uint32_t level
)
515 iris_resource_check_level_layer(res
, level
, 0);
516 // return res->level[level].has_hiz;
520 /** \brief Assert that the level and layer are valid for the resource. */
522 iris_resource_check_level_layer(UNUSED
const struct iris_resource
*res
,
523 UNUSED
uint32_t level
, UNUSED
uint32_t layer
)
525 assert(level
< res
->surf
.levels
);
526 assert(layer
< util_num_layers(&res
->base
, level
));
529 static inline uint32_t
530 miptree_level_range_length(const struct iris_resource
*res
,
531 uint32_t start_level
, uint32_t num_levels
)
533 assert(start_level
< res
->surf
.levels
);
535 if (num_levels
== INTEL_REMAINING_LAYERS
)
536 num_levels
= res
->surf
.levels
;
538 /* Check for overflow */
539 assert(start_level
+ num_levels
>= start_level
);
540 assert(start_level
+ num_levels
<= res
->surf
.levels
);
545 static inline uint32_t
546 miptree_layer_range_length(const struct iris_resource
*res
, uint32_t level
,
547 uint32_t start_layer
, uint32_t num_layers
)
549 assert(level
<= res
->base
.last_level
);
551 const uint32_t total_num_layers
= iris_get_num_logical_layers(res
, level
);
552 assert(start_layer
< total_num_layers
);
553 if (num_layers
== INTEL_REMAINING_LAYERS
)
554 num_layers
= total_num_layers
- start_layer
;
555 /* Check for overflow */
556 assert(start_layer
+ num_layers
>= start_layer
);
557 assert(start_layer
+ num_layers
<= total_num_layers
);
563 has_color_unresolved(const struct iris_resource
*res
,
564 unsigned start_level
, unsigned num_levels
,
565 unsigned start_layer
, unsigned num_layers
)
570 /* Clamp the level range to fit the resource */
571 num_levels
= miptree_level_range_length(res
, start_level
, num_levels
);
573 for (uint32_t l
= 0; l
< num_levels
; l
++) {
574 const uint32_t level
= start_level
+ l
;
575 const uint32_t level_layers
=
576 miptree_layer_range_length(res
, level
, start_layer
, num_layers
);
577 for (unsigned a
= 0; a
< level_layers
; a
++) {
578 enum isl_aux_state aux_state
=
579 iris_resource_get_aux_state(res
, level
, start_layer
+ a
);
580 assert(aux_state
!= ISL_AUX_STATE_AUX_INVALID
);
581 if (aux_state
!= ISL_AUX_STATE_PASS_THROUGH
)
589 static enum isl_aux_op
590 get_ccs_d_resolve_op(enum isl_aux_state aux_state
,
591 enum isl_aux_usage aux_usage
,
592 bool fast_clear_supported
)
594 assert(aux_usage
== ISL_AUX_USAGE_NONE
|| aux_usage
== ISL_AUX_USAGE_CCS_D
);
596 const bool ccs_supported
= aux_usage
== ISL_AUX_USAGE_CCS_D
;
598 assert(ccs_supported
== fast_clear_supported
);
601 case ISL_AUX_STATE_CLEAR
:
602 case ISL_AUX_STATE_PARTIAL_CLEAR
:
604 return ISL_AUX_OP_FULL_RESOLVE
;
606 return ISL_AUX_OP_NONE
;
608 case ISL_AUX_STATE_PASS_THROUGH
:
609 return ISL_AUX_OP_NONE
;
611 case ISL_AUX_STATE_RESOLVED
:
612 case ISL_AUX_STATE_AUX_INVALID
:
613 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
614 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
618 unreachable("Invalid aux state for CCS_D");
621 static enum isl_aux_op
622 get_ccs_e_resolve_op(enum isl_aux_state aux_state
,
623 enum isl_aux_usage aux_usage
,
624 bool fast_clear_supported
)
626 /* CCS_E surfaces can be accessed as CCS_D if we're careful. */
627 assert(aux_usage
== ISL_AUX_USAGE_NONE
||
628 aux_usage
== ISL_AUX_USAGE_CCS_D
||
629 aux_usage
== ISL_AUX_USAGE_CCS_E
);
631 if (aux_usage
== ISL_AUX_USAGE_CCS_D
)
632 assert(fast_clear_supported
);
635 case ISL_AUX_STATE_CLEAR
:
636 case ISL_AUX_STATE_PARTIAL_CLEAR
:
637 if (fast_clear_supported
)
638 return ISL_AUX_OP_NONE
;
639 else if (aux_usage
== ISL_AUX_USAGE_CCS_E
)
640 return ISL_AUX_OP_PARTIAL_RESOLVE
;
642 return ISL_AUX_OP_FULL_RESOLVE
;
644 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
645 if (aux_usage
!= ISL_AUX_USAGE_CCS_E
)
646 return ISL_AUX_OP_FULL_RESOLVE
;
647 else if (!fast_clear_supported
)
648 return ISL_AUX_OP_PARTIAL_RESOLVE
;
650 return ISL_AUX_OP_NONE
;
652 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
653 if (aux_usage
!= ISL_AUX_USAGE_CCS_E
)
654 return ISL_AUX_OP_FULL_RESOLVE
;
656 return ISL_AUX_OP_NONE
;
658 case ISL_AUX_STATE_PASS_THROUGH
:
659 return ISL_AUX_OP_NONE
;
661 case ISL_AUX_STATE_RESOLVED
:
662 case ISL_AUX_STATE_AUX_INVALID
:
666 unreachable("Invalid aux state for CCS_E");
670 iris_resource_prepare_ccs_access(struct iris_context
*ice
,
671 struct iris_batch
*batch
,
672 struct iris_resource
*res
,
673 uint32_t level
, uint32_t layer
,
674 enum isl_aux_usage aux_usage
,
675 bool fast_clear_supported
)
677 enum isl_aux_state aux_state
= iris_resource_get_aux_state(res
, level
, layer
);
679 enum isl_aux_op resolve_op
;
680 if (res
->aux
.usage
== ISL_AUX_USAGE_CCS_E
) {
681 resolve_op
= get_ccs_e_resolve_op(aux_state
, aux_usage
,
682 fast_clear_supported
);
684 assert(res
->aux
.usage
== ISL_AUX_USAGE_CCS_D
);
685 resolve_op
= get_ccs_d_resolve_op(aux_state
, aux_usage
,
686 fast_clear_supported
);
689 if (resolve_op
!= ISL_AUX_OP_NONE
) {
690 iris_resolve_color(ice
, batch
, res
, level
, layer
, resolve_op
);
692 switch (resolve_op
) {
693 case ISL_AUX_OP_FULL_RESOLVE
:
694 /* The CCS full resolve operation destroys the CCS and sets it to the
695 * pass-through state. (You can also think of this as being both a
696 * resolve and an ambiguate in one operation.)
698 iris_resource_set_aux_state(res
, level
, layer
, 1,
699 ISL_AUX_STATE_PASS_THROUGH
);
702 case ISL_AUX_OP_PARTIAL_RESOLVE
:
703 iris_resource_set_aux_state(res
, level
, layer
, 1,
704 ISL_AUX_STATE_COMPRESSED_NO_CLEAR
);
708 unreachable("Invalid resolve op");
714 iris_resource_finish_ccs_write(struct iris_context
*ice
,
715 struct iris_resource
*res
,
716 uint32_t level
, uint32_t layer
,
717 enum isl_aux_usage aux_usage
)
719 assert(aux_usage
== ISL_AUX_USAGE_NONE
||
720 aux_usage
== ISL_AUX_USAGE_CCS_D
||
721 aux_usage
== ISL_AUX_USAGE_CCS_E
);
723 enum isl_aux_state aux_state
=
724 iris_resource_get_aux_state(res
, level
, layer
);
726 if (res
->aux
.usage
== ISL_AUX_USAGE_CCS_E
) {
728 case ISL_AUX_STATE_CLEAR
:
729 case ISL_AUX_STATE_PARTIAL_CLEAR
:
730 assert(aux_usage
== ISL_AUX_USAGE_CCS_E
||
731 aux_usage
== ISL_AUX_USAGE_CCS_D
);
733 if (aux_usage
== ISL_AUX_USAGE_CCS_E
) {
734 iris_resource_set_aux_state(res
, level
, layer
, 1,
735 ISL_AUX_STATE_COMPRESSED_CLEAR
);
736 } else if (aux_state
!= ISL_AUX_STATE_PARTIAL_CLEAR
) {
737 iris_resource_set_aux_state(res
, level
, layer
, 1,
738 ISL_AUX_STATE_PARTIAL_CLEAR
);
742 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
743 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
744 assert(aux_usage
== ISL_AUX_USAGE_CCS_E
);
745 break; /* Nothing to do */
747 case ISL_AUX_STATE_PASS_THROUGH
:
748 if (aux_usage
== ISL_AUX_USAGE_CCS_E
) {
749 iris_resource_set_aux_state(res
, level
, layer
, 1,
750 ISL_AUX_STATE_COMPRESSED_NO_CLEAR
);
756 case ISL_AUX_STATE_RESOLVED
:
757 case ISL_AUX_STATE_AUX_INVALID
:
758 unreachable("Invalid aux state for CCS_E");
761 assert(res
->aux
.usage
== ISL_AUX_USAGE_CCS_D
);
762 /* CCS_D is a bit simpler */
764 case ISL_AUX_STATE_CLEAR
:
765 assert(aux_usage
== ISL_AUX_USAGE_CCS_D
);
766 iris_resource_set_aux_state(res
, level
, layer
, 1,
767 ISL_AUX_STATE_PARTIAL_CLEAR
);
770 case ISL_AUX_STATE_PARTIAL_CLEAR
:
771 assert(aux_usage
== ISL_AUX_USAGE_CCS_D
);
772 break; /* Nothing to do */
774 case ISL_AUX_STATE_PASS_THROUGH
:
778 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
779 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
780 case ISL_AUX_STATE_RESOLVED
:
781 case ISL_AUX_STATE_AUX_INVALID
:
782 unreachable("Invalid aux state for CCS_D");
788 iris_resource_prepare_mcs_access(struct iris_context
*ice
,
789 struct iris_batch
*batch
,
790 struct iris_resource
*res
,
792 enum isl_aux_usage aux_usage
,
793 bool fast_clear_supported
)
795 assert(aux_usage
== ISL_AUX_USAGE_MCS
);
797 switch (iris_resource_get_aux_state(res
, 0, layer
)) {
798 case ISL_AUX_STATE_CLEAR
:
799 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
800 if (!fast_clear_supported
) {
801 iris_mcs_partial_resolve(ice
, batch
, res
, layer
, 1);
802 iris_resource_set_aux_state(res
, 0, layer
, 1,
803 ISL_AUX_STATE_COMPRESSED_NO_CLEAR
);
807 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
808 break; /* Nothing to do */
810 case ISL_AUX_STATE_RESOLVED
:
811 case ISL_AUX_STATE_PASS_THROUGH
:
812 case ISL_AUX_STATE_AUX_INVALID
:
813 case ISL_AUX_STATE_PARTIAL_CLEAR
:
814 unreachable("Invalid aux state for MCS");
819 iris_resource_finish_mcs_write(struct iris_context
*ice
,
820 struct iris_resource
*res
,
822 enum isl_aux_usage aux_usage
)
824 assert(aux_usage
== ISL_AUX_USAGE_MCS
);
826 switch (iris_resource_get_aux_state(res
, 0, layer
)) {
827 case ISL_AUX_STATE_CLEAR
:
828 iris_resource_set_aux_state(res
, 0, layer
, 1,
829 ISL_AUX_STATE_COMPRESSED_CLEAR
);
832 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
833 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
834 break; /* Nothing to do */
836 case ISL_AUX_STATE_RESOLVED
:
837 case ISL_AUX_STATE_PASS_THROUGH
:
838 case ISL_AUX_STATE_AUX_INVALID
:
839 case ISL_AUX_STATE_PARTIAL_CLEAR
:
840 unreachable("Invalid aux state for MCS");
845 iris_resource_prepare_hiz_access(struct iris_context
*ice
,
846 struct iris_batch
*batch
,
847 struct iris_resource
*res
,
848 uint32_t level
, uint32_t layer
,
849 enum isl_aux_usage aux_usage
,
850 bool fast_clear_supported
)
852 assert(aux_usage
== ISL_AUX_USAGE_NONE
|| aux_usage
== ISL_AUX_USAGE_HIZ
);
854 enum isl_aux_op hiz_op
= ISL_AUX_OP_NONE
;
855 switch (iris_resource_get_aux_state(res
, level
, layer
)) {
856 case ISL_AUX_STATE_CLEAR
:
857 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
858 if (aux_usage
!= ISL_AUX_USAGE_HIZ
|| !fast_clear_supported
)
859 hiz_op
= ISL_AUX_OP_FULL_RESOLVE
;
862 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
863 if (aux_usage
!= ISL_AUX_USAGE_HIZ
)
864 hiz_op
= ISL_AUX_OP_FULL_RESOLVE
;
867 case ISL_AUX_STATE_PASS_THROUGH
:
868 case ISL_AUX_STATE_RESOLVED
:
871 case ISL_AUX_STATE_AUX_INVALID
:
872 if (aux_usage
== ISL_AUX_USAGE_HIZ
)
873 hiz_op
= ISL_AUX_OP_AMBIGUATE
;
876 case ISL_AUX_STATE_PARTIAL_CLEAR
:
877 unreachable("Invalid HiZ state");
880 if (hiz_op
!= ISL_AUX_OP_NONE
) {
882 //intel_hiz_exec(ice, res, level, layer, 1, hiz_op);
885 case ISL_AUX_OP_FULL_RESOLVE
:
886 iris_resource_set_aux_state(res
, level
, layer
, 1,
887 ISL_AUX_STATE_RESOLVED
);
890 case ISL_AUX_OP_AMBIGUATE
:
891 /* The HiZ resolve operation is actually an ambiguate */
892 iris_resource_set_aux_state(res
, level
, layer
, 1,
893 ISL_AUX_STATE_PASS_THROUGH
);
897 unreachable("Invalid HiZ op");
903 iris_resource_finish_hiz_write(struct iris_context
*ice
,
904 struct iris_resource
*res
,
905 uint32_t level
, uint32_t layer
,
906 enum isl_aux_usage aux_usage
)
908 assert(aux_usage
== ISL_AUX_USAGE_NONE
|| aux_usage
== ISL_AUX_USAGE_HIZ
);
910 switch (iris_resource_get_aux_state(res
, level
, layer
)) {
911 case ISL_AUX_STATE_CLEAR
:
912 assert(aux_usage
== ISL_AUX_USAGE_HIZ
);
913 iris_resource_set_aux_state(res
, level
, layer
, 1,
914 ISL_AUX_STATE_COMPRESSED_CLEAR
);
917 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR
:
918 case ISL_AUX_STATE_COMPRESSED_CLEAR
:
919 assert(aux_usage
== ISL_AUX_USAGE_HIZ
);
920 break; /* Nothing to do */
922 case ISL_AUX_STATE_RESOLVED
:
923 if (aux_usage
== ISL_AUX_USAGE_HIZ
) {
924 iris_resource_set_aux_state(res
, level
, layer
, 1,
925 ISL_AUX_STATE_COMPRESSED_NO_CLEAR
);
927 iris_resource_set_aux_state(res
, level
, layer
, 1,
928 ISL_AUX_STATE_AUX_INVALID
);
932 case ISL_AUX_STATE_PASS_THROUGH
:
933 if (aux_usage
== ISL_AUX_USAGE_HIZ
) {
934 iris_resource_set_aux_state(res
, level
, layer
, 1,
935 ISL_AUX_STATE_COMPRESSED_NO_CLEAR
);
939 case ISL_AUX_STATE_AUX_INVALID
:
940 assert(aux_usage
!= ISL_AUX_USAGE_HIZ
);
943 case ISL_AUX_STATE_PARTIAL_CLEAR
:
944 unreachable("Invalid HiZ state");
949 iris_resource_prepare_access(struct iris_context
*ice
,
950 struct iris_batch
*batch
,
951 struct iris_resource
*res
,
952 uint32_t start_level
, uint32_t num_levels
,
953 uint32_t start_layer
, uint32_t num_layers
,
954 enum isl_aux_usage aux_usage
,
955 bool fast_clear_supported
)
957 num_levels
= miptree_level_range_length(res
, start_level
, num_levels
);
959 switch (res
->aux
.usage
) {
960 case ISL_AUX_USAGE_NONE
:
964 case ISL_AUX_USAGE_MCS
:
965 assert(start_level
== 0 && num_levels
== 1);
966 const uint32_t level_layers
=
967 miptree_layer_range_length(res
, 0, start_layer
, num_layers
);
968 for (uint32_t a
= 0; a
< level_layers
; a
++) {
969 iris_resource_prepare_mcs_access(ice
, batch
, res
, start_layer
+ a
,
970 aux_usage
, fast_clear_supported
);
974 case ISL_AUX_USAGE_CCS_D
:
975 case ISL_AUX_USAGE_CCS_E
:
976 for (uint32_t l
= 0; l
< num_levels
; l
++) {
977 const uint32_t level
= start_level
+ l
;
978 const uint32_t level_layers
=
979 miptree_layer_range_length(res
, level
, start_layer
, num_layers
);
980 for (uint32_t a
= 0; a
< level_layers
; a
++) {
981 iris_resource_prepare_ccs_access(ice
, batch
, res
, level
,
983 aux_usage
, fast_clear_supported
);
988 case ISL_AUX_USAGE_HIZ
:
989 for (uint32_t l
= 0; l
< num_levels
; l
++) {
990 const uint32_t level
= start_level
+ l
;
991 if (!iris_resource_level_has_hiz(res
, level
))
994 const uint32_t level_layers
=
995 miptree_layer_range_length(res
, level
, start_layer
, num_layers
);
996 for (uint32_t a
= 0; a
< level_layers
; a
++) {
997 iris_resource_prepare_hiz_access(ice
, batch
, res
, level
,
998 start_layer
+ a
, aux_usage
,
999 fast_clear_supported
);
1005 unreachable("Invalid aux usage");
1010 iris_resource_finish_write(struct iris_context
*ice
,
1011 struct iris_resource
*res
, uint32_t level
,
1012 uint32_t start_layer
, uint32_t num_layers
,
1013 enum isl_aux_usage aux_usage
)
1015 num_layers
= miptree_layer_range_length(res
, level
, start_layer
, num_layers
);
1017 switch (res
->aux
.usage
) {
1018 case ISL_AUX_USAGE_NONE
:
1021 case ISL_AUX_USAGE_MCS
:
1022 for (uint32_t a
= 0; a
< num_layers
; a
++) {
1023 iris_resource_finish_mcs_write(ice
, res
, start_layer
+ a
,
1028 case ISL_AUX_USAGE_CCS_D
:
1029 case ISL_AUX_USAGE_CCS_E
:
1030 for (uint32_t a
= 0; a
< num_layers
; a
++) {
1031 iris_resource_finish_ccs_write(ice
, res
, level
, start_layer
+ a
,
1036 case ISL_AUX_USAGE_HIZ
:
1037 if (!iris_resource_level_has_hiz(res
, level
))
1040 for (uint32_t a
= 0; a
< num_layers
; a
++) {
1041 iris_resource_finish_hiz_write(ice
, res
, level
, start_layer
+ a
,
1047 unreachable("Invavlid aux usage");
1052 iris_resource_get_aux_state(const struct iris_resource
*res
,
1053 uint32_t level
, uint32_t layer
)
1055 iris_resource_check_level_layer(res
, level
, layer
);
1057 if (res
->surf
.usage
& ISL_SURF_USAGE_DEPTH_BIT
) {
1058 assert(iris_resource_level_has_hiz(res
, level
));
1059 } else if (res
->surf
.usage
& ISL_SURF_USAGE_STENCIL_BIT
) {
1060 unreachable("Cannot get aux state for stencil");
1062 assert(res
->surf
.samples
== 1 ||
1063 res
->surf
.msaa_layout
== ISL_MSAA_LAYOUT_ARRAY
);
1066 return res
->aux
.state
[level
][layer
];
1070 iris_resource_set_aux_state(struct iris_resource
*res
, uint32_t level
,
1071 uint32_t start_layer
, uint32_t num_layers
,
1072 enum isl_aux_state aux_state
)
1074 num_layers
= miptree_layer_range_length(res
, level
, start_layer
, num_layers
);
1076 if (res
->surf
.usage
& ISL_SURF_USAGE_DEPTH_BIT
) {
1077 assert(iris_resource_level_has_hiz(res
, level
));
1078 } else if (res
->surf
.usage
& ISL_SURF_USAGE_STENCIL_BIT
) {
1079 unreachable("Cannot set aux state for stencil");
1081 assert(res
->surf
.samples
== 1 ||
1082 res
->surf
.msaa_layout
== ISL_MSAA_LAYOUT_ARRAY
);
1085 for (unsigned a
= 0; a
< num_layers
; a
++) {
1086 if (res
->aux
.state
[level
][start_layer
+ a
] != aux_state
) {
1087 res
->aux
.state
[level
][start_layer
+ a
] = aux_state
;
1088 // XXX: dirty works differently
1089 // brw->ctx.NewDriverState |= BRW_NEW_AUX_STATE;
1094 /* On Gen9 color buffers may be compressed by the hardware (lossless
1095 * compression). There are, however, format restrictions and care needs to be
1096 * taken that the sampler engine is capable for re-interpreting a buffer with
1097 * format different the buffer was originally written with.
1099 * For example, SRGB formats are not compressible and the sampler engine isn't
1100 * capable of treating RGBA_UNORM as SRGB_ALPHA. In such a case the underlying
1101 * color buffer needs to be resolved so that the sampling surface can be
1102 * sampled as non-compressed (i.e., without the auxiliary MCS buffer being
1106 can_texture_with_ccs(const struct gen_device_info
*devinfo
,
1107 struct pipe_debug_callback
*dbg
,
1108 const struct iris_resource
*res
,
1109 enum isl_format view_format
)
1111 if (res
->aux
.usage
!= ISL_AUX_USAGE_CCS_E
)
1114 if (!format_ccs_e_compat_with_resource(devinfo
, res
, view_format
)) {
1115 const struct isl_format_layout
*res_fmtl
=
1116 isl_format_get_layout(res
->surf
.format
);
1117 const struct isl_format_layout
*view_fmtl
=
1118 isl_format_get_layout(view_format
);
1120 perf_debug(dbg
, "Incompatible sampling format (%s) for CCS (%s)\n",
1121 view_fmtl
->name
, res_fmtl
->name
);
1130 iris_resource_texture_aux_usage(struct iris_context
*ice
,
1131 const struct iris_resource
*res
,
1132 enum isl_format view_format
,
1133 enum gen9_astc5x5_wa_tex_type astc5x5_wa_bits
)
1135 struct iris_screen
*screen
= (void *) ice
->ctx
.screen
;
1136 struct gen_device_info
*devinfo
= &screen
->devinfo
;
1138 assert(devinfo
->gen
== 9 || astc5x5_wa_bits
== 0);
1140 /* On gen9, ASTC 5x5 textures cannot live in the sampler cache along side
1141 * CCS or HiZ compressed textures. See gen9_apply_astc5x5_wa_flush() for
1144 if ((astc5x5_wa_bits
& GEN9_ASTC5X5_WA_TEX_TYPE_ASTC5x5
) &&
1145 res
->aux
.usage
!= ISL_AUX_USAGE_MCS
)
1146 return ISL_AUX_USAGE_NONE
;
1148 switch (res
->aux
.usage
) {
1149 case ISL_AUX_USAGE_HIZ
:
1150 if (sample_with_hiz(devinfo
, res
))
1151 return ISL_AUX_USAGE_HIZ
;
1154 case ISL_AUX_USAGE_MCS
:
1155 return ISL_AUX_USAGE_MCS
;
1157 case ISL_AUX_USAGE_CCS_D
:
1158 case ISL_AUX_USAGE_CCS_E
:
1159 /* If we don't have any unresolved color, report an aux usage of
1160 * ISL_AUX_USAGE_NONE. This way, texturing won't even look at the
1161 * aux surface and we can save some bandwidth.
1163 if (!has_color_unresolved(res
, 0, INTEL_REMAINING_LEVELS
,
1164 0, INTEL_REMAINING_LAYERS
))
1165 return ISL_AUX_USAGE_NONE
;
1167 if (can_texture_with_ccs(devinfo
, &ice
->dbg
, res
, view_format
))
1168 return ISL_AUX_USAGE_CCS_E
;
1175 return ISL_AUX_USAGE_NONE
;
1179 isl_formats_are_fast_clear_compatible(enum isl_format a
, enum isl_format b
)
1181 /* On gen8 and earlier, the hardware was only capable of handling 0/1 clear
1182 * values so sRGB curve application was a no-op for all fast-clearable
1185 * On gen9+, the hardware supports arbitrary clear values. For sRGB clear
1186 * values, the hardware interprets the floats, not as what would be
1187 * returned from the sampler (or written by the shader), but as being
1188 * between format conversion and sRGB curve application. This means that
1189 * we can switch between sRGB and UNORM without having to whack the clear
1192 return isl_format_srgb_to_linear(a
) == isl_format_srgb_to_linear(b
);
1196 iris_resource_prepare_texture(struct iris_context
*ice
,
1197 struct iris_batch
*batch
,
1198 struct iris_resource
*res
,
1199 enum isl_format view_format
,
1200 uint32_t start_level
, uint32_t num_levels
,
1201 uint32_t start_layer
, uint32_t num_layers
,
1202 enum gen9_astc5x5_wa_tex_type astc5x5_wa_bits
)
1204 enum isl_aux_usage aux_usage
=
1205 iris_resource_texture_aux_usage(ice
, res
, view_format
, astc5x5_wa_bits
);
1207 bool clear_supported
= aux_usage
!= ISL_AUX_USAGE_NONE
;
1209 /* Clear color is specified as ints or floats and the conversion is done by
1210 * the sampler. If we have a texture view, we would have to perform the
1211 * clear color conversion manually. Just disable clear color.
1213 if (!isl_formats_are_fast_clear_compatible(res
->surf
.format
, view_format
))
1214 clear_supported
= false;
1216 iris_resource_prepare_access(ice
, batch
, res
, start_level
, num_levels
,
1217 start_layer
, num_layers
,
1218 aux_usage
, clear_supported
);
1222 iris_resource_prepare_image(struct iris_context
*ice
,
1223 struct iris_batch
*batch
,
1224 struct iris_resource
*res
)
1226 /* The data port doesn't understand any compression */
1227 iris_resource_prepare_access(ice
, batch
, res
, 0, INTEL_REMAINING_LEVELS
,
1228 0, INTEL_REMAINING_LAYERS
,
1229 ISL_AUX_USAGE_NONE
, false);
1233 iris_resource_render_aux_usage(struct iris_context
*ice
,
1234 struct iris_resource
*res
,
1235 enum isl_format render_format
,
1237 bool draw_aux_disabled
)
1239 struct iris_screen
*screen
= (void *) ice
->ctx
.screen
;
1240 struct gen_device_info
*devinfo
= &screen
->devinfo
;
1242 if (draw_aux_disabled
)
1243 return ISL_AUX_USAGE_NONE
;
1245 switch (res
->aux
.usage
) {
1246 case ISL_AUX_USAGE_MCS
:
1247 return ISL_AUX_USAGE_MCS
;
1249 case ISL_AUX_USAGE_CCS_D
:
1250 case ISL_AUX_USAGE_CCS_E
:
1251 /* Gen9+ hardware technically supports non-0/1 clear colors with sRGB
1252 * formats. However, there are issues with blending where it doesn't
1253 * properly apply the sRGB curve to the clear color when blending.
1256 if (devinfo->gen >= 9 && blend_enabled &&
1257 isl_format_is_srgb(render_format) &&
1258 !isl_color_value_is_zero_one(res->fast_clear_color, render_format))
1259 return ISL_AUX_USAGE_NONE;
1262 if (res
->aux
.usage
== ISL_AUX_USAGE_CCS_E
&&
1263 format_ccs_e_compat_with_resource(devinfo
, res
, render_format
))
1264 return ISL_AUX_USAGE_CCS_E
;
1266 /* Otherwise, we have to fall back to CCS_D */
1267 return ISL_AUX_USAGE_CCS_D
;
1270 return ISL_AUX_USAGE_NONE
;
1275 iris_resource_prepare_render(struct iris_context
*ice
,
1276 struct iris_batch
*batch
,
1277 struct iris_resource
*res
, uint32_t level
,
1278 uint32_t start_layer
, uint32_t layer_count
,
1279 enum isl_aux_usage aux_usage
)
1281 iris_resource_prepare_access(ice
, batch
, res
, level
, 1, start_layer
,
1282 layer_count
, aux_usage
,
1283 aux_usage
!= ISL_AUX_USAGE_NONE
);
1287 iris_resource_finish_render(struct iris_context
*ice
,
1288 struct iris_resource
*res
, uint32_t level
,
1289 uint32_t start_layer
, uint32_t layer_count
,
1290 enum isl_aux_usage aux_usage
)
1292 iris_resource_finish_write(ice
, res
, level
, start_layer
, layer_count
,
1297 iris_resource_prepare_depth(struct iris_context
*ice
,
1298 struct iris_batch
*batch
,
1299 struct iris_resource
*res
, uint32_t level
,
1300 uint32_t start_layer
, uint32_t layer_count
)
1302 iris_resource_prepare_access(ice
, batch
, res
, level
, 1, start_layer
,
1303 layer_count
, res
->aux
.usage
, !!res
->aux
.bo
);
1307 iris_resource_finish_depth(struct iris_context
*ice
,
1308 struct iris_resource
*res
, uint32_t level
,
1309 uint32_t start_layer
, uint32_t layer_count
,
1312 if (depth_written
) {
1313 iris_resource_finish_write(ice
, res
, level
, start_layer
, layer_count
,