iris: Fix aux usage in render resolve code
[mesa.git] / src / gallium / drivers / iris / iris_resolve.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_resolve.c
25 *
26 * This file handles resolve tracking for main and auxiliary surfaces.
27 *
28 * It also handles our cache tracking. We have sets for the render cache,
29 * depth cache, and so on. If a BO is in a cache's set, then it may have
30 * data in that cache. The helpers take care of emitting flushes for
31 * render-to-texture, format reinterpretation issues, and other situations.
32 */
33
34 #include "util/hash_table.h"
35 #include "util/set.h"
36 #include "iris_context.h"
37
38 /**
39 * Disable auxiliary buffers if a renderbuffer is also bound as a texture
40 * or shader image. This causes a self-dependency, where both rendering
41 * and sampling may concurrently read or write the CCS buffer, causing
42 * incorrect pixels.
43 */
44 static bool
45 disable_rb_aux_buffer(struct iris_context *ice,
46 bool *draw_aux_buffer_disabled,
47 struct iris_resource *tex_res,
48 unsigned min_level, unsigned num_levels,
49 const char *usage)
50 {
51 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
52 bool found = false;
53
54 /* We only need to worry about color compression and fast clears. */
55 if (tex_res->aux.usage != ISL_AUX_USAGE_CCS_D &&
56 tex_res->aux.usage != ISL_AUX_USAGE_CCS_E)
57 return false;
58
59 for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
60 struct iris_surface *surf = (void *) cso_fb->cbufs[i];
61 if (!surf)
62 continue;
63
64 struct iris_resource *rb_res = (void *) surf->base.texture;
65
66 if (rb_res->bo == tex_res->bo &&
67 surf->base.u.tex.level >= min_level &&
68 surf->base.u.tex.level < min_level + num_levels) {
69 found = draw_aux_buffer_disabled[i] = true;
70 }
71 }
72
73 if (found) {
74 perf_debug(&ice->dbg,
75 "Disabling CCS because a renderbuffer is also bound %s.\n",
76 usage);
77 }
78
79 return found;
80 }
81
82 static void
83 resolve_sampler_views(struct iris_context *ice,
84 struct iris_batch *batch,
85 struct iris_shader_state *shs,
86 bool *draw_aux_buffer_disabled,
87 bool consider_framebuffer)
88 {
89 uint32_t views = shs->bound_sampler_views;
90
91 unsigned astc5x5_wa_bits = 0; // XXX: actual tracking
92
93 while (views) {
94 const int i = u_bit_scan(&views);
95 struct iris_sampler_view *isv = shs->textures[i];
96 struct iris_resource *res = (void *) isv->base.texture;
97
98 if (res->base.target != PIPE_BUFFER) {
99 if (consider_framebuffer) {
100 disable_rb_aux_buffer(ice, draw_aux_buffer_disabled,
101 res, isv->view.base_level, isv->view.levels,
102 "for sampling");
103 }
104
105 iris_resource_prepare_texture(ice, batch, res, isv->view.format,
106 isv->view.base_level, isv->view.levels,
107 isv->view.base_array_layer,
108 isv->view.array_len,
109 astc5x5_wa_bits);
110 }
111
112 iris_cache_flush_for_read(batch, res->bo);
113 }
114 }
115
116 static void
117 resolve_image_views(struct iris_context *ice,
118 struct iris_batch *batch,
119 struct iris_shader_state *shs,
120 bool *draw_aux_buffer_disabled,
121 bool consider_framebuffer)
122 {
123 uint32_t views = shs->bound_image_views;
124
125 while (views) {
126 const int i = u_bit_scan(&views);
127 struct iris_resource *res = (void *) shs->image[i].res;
128
129 if (res->base.target != PIPE_BUFFER) {
130 if (consider_framebuffer) {
131 disable_rb_aux_buffer(ice, draw_aux_buffer_disabled,
132 res, 0, ~0, "as a shader image");
133 }
134
135 iris_resource_prepare_image(ice, batch, res);
136 }
137
138 iris_cache_flush_for_read(batch, res->bo);
139 }
140 }
141
142
143 /**
144 * \brief Resolve buffers before drawing.
145 *
146 * Resolve the depth buffer's HiZ buffer, resolve the depth buffer of each
147 * enabled depth texture, and flush the render cache for any dirty textures.
148 */
149 void
150 iris_predraw_resolve_inputs(struct iris_context *ice,
151 struct iris_batch *batch,
152 struct iris_shader_state *shs,
153 bool *draw_aux_buffer_disabled,
154 bool consider_framebuffer)
155 {
156 resolve_sampler_views(ice, batch, shs, draw_aux_buffer_disabled, consider_framebuffer);
157 resolve_image_views(ice, batch, shs, draw_aux_buffer_disabled, consider_framebuffer);
158
159 // XXX: ASTC hacks
160 }
161
162 void
163 iris_predraw_resolve_framebuffer(struct iris_context *ice,
164 struct iris_batch *batch,
165 bool *draw_aux_buffer_disabled)
166 {
167 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
168 struct pipe_surface *zs_surf = cso_fb->zsbuf;
169
170 if (zs_surf) {
171 struct iris_resource *z_res, *s_res;
172 iris_get_depth_stencil_resources(zs_surf->texture, &z_res, &s_res);
173 unsigned num_layers =
174 zs_surf->u.tex.last_layer - zs_surf->u.tex.first_layer + 1;
175
176 if (z_res) {
177 iris_resource_prepare_depth(ice, batch, z_res, zs_surf->u.tex.level,
178 zs_surf->u.tex.first_layer, num_layers);
179 }
180 }
181
182 for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
183 struct iris_surface *surf = (void *) cso_fb->cbufs[i];
184 if (!surf)
185 continue;
186
187 struct iris_resource *res = (void *) surf->base.texture;
188
189 enum isl_aux_usage aux_usage =
190 iris_resource_render_aux_usage(ice, res, surf->view.format,
191 ice->state.blend_enables & (1u << i),
192 draw_aux_buffer_disabled[i]);
193
194 // XXX: NEW_AUX_STATE
195 ice->state.draw_aux_usage[i] = aux_usage;
196
197 iris_resource_prepare_render(ice, batch, res, surf->view.base_level,
198 surf->view.base_array_layer,
199 surf->view.array_len,
200 aux_usage);
201
202 iris_cache_flush_for_render(batch, res->bo, surf->view.format,
203 aux_usage);
204 }
205 }
206
207 /**
208 * \brief Call this after drawing to mark which buffers need resolving
209 *
210 * If the depth buffer was written to and if it has an accompanying HiZ
211 * buffer, then mark that it needs a depth resolve.
212 *
213 * If the color buffer is a multisample window system buffer, then
214 * mark that it needs a downsample.
215 *
216 * Also mark any render targets which will be textured as needing a render
217 * cache flush.
218 */
219 void
220 iris_postdraw_update_resolve_tracking(struct iris_context *ice,
221 struct iris_batch *batch)
222 {
223 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
224 struct pipe_surface *zs_surf = cso_fb->zsbuf;
225
226 // XXX: front buffer drawing?
227
228 if (zs_surf) {
229 struct iris_resource *z_res, *s_res;
230 iris_get_depth_stencil_resources(zs_surf->texture, &z_res, &s_res);
231 unsigned num_layers =
232 zs_surf->u.tex.last_layer - zs_surf->u.tex.first_layer + 1;
233
234 if (z_res) {
235 iris_resource_finish_depth(ice, z_res, zs_surf->u.tex.level,
236 zs_surf->u.tex.first_layer, num_layers,
237 ice->state.depth_writes_enabled);
238
239 if (ice->state.depth_writes_enabled)
240 iris_depth_cache_add_bo(batch, z_res->bo);
241 }
242
243 if (s_res) {
244 iris_resource_finish_write(ice, s_res, zs_surf->u.tex.level,
245 zs_surf->u.tex.first_layer, num_layers,
246 ISL_AUX_USAGE_NONE);
247
248 if (ice->state.stencil_writes_enabled)
249 iris_depth_cache_add_bo(batch, s_res->bo);
250 }
251 }
252
253 for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
254 struct iris_surface *surf = (void *) cso_fb->cbufs[i];
255 if (!surf)
256 continue;
257
258 struct iris_resource *res = (void *) surf->base.texture;
259 union pipe_surface_desc *desc = &surf->base.u;
260 unsigned num_layers = desc->tex.last_layer - desc->tex.first_layer + 1;
261 enum isl_aux_usage aux_usage = ice->state.draw_aux_usage[i];
262
263 iris_render_cache_add_bo(batch, res->bo, surf->view.format, aux_usage);
264
265 iris_resource_finish_render(ice, res, desc->tex.level,
266 desc->tex.first_layer, num_layers,
267 aux_usage);
268 }
269 }
270
271 /**
272 * Clear the cache-tracking sets.
273 */
274 void
275 iris_cache_sets_clear(struct iris_batch *batch)
276 {
277 hash_table_foreach(batch->cache.render, render_entry)
278 _mesa_hash_table_remove(batch->cache.render, render_entry);
279
280 set_foreach(batch->cache.depth, depth_entry)
281 _mesa_set_remove(batch->cache.depth, depth_entry);
282 }
283
284 /**
285 * Emits an appropriate flush for a BO if it has been rendered to within the
286 * same batchbuffer as a read that's about to be emitted.
287 *
288 * The GPU has separate, incoherent caches for the render cache and the
289 * sampler cache, along with other caches. Usually data in the different
290 * caches don't interact (e.g. we don't render to our driver-generated
291 * immediate constant data), but for render-to-texture in FBOs we definitely
292 * do. When a batchbuffer is flushed, the kernel will ensure that everything
293 * necessary is flushed before another use of that BO, but for reuse from
294 * different caches within a batchbuffer, it's all our responsibility.
295 */
296 void
297 iris_flush_depth_and_render_caches(struct iris_batch *batch)
298 {
299 iris_emit_pipe_control_flush(batch,
300 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
301 PIPE_CONTROL_RENDER_TARGET_FLUSH |
302 PIPE_CONTROL_CS_STALL);
303
304 iris_emit_pipe_control_flush(batch,
305 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
306 PIPE_CONTROL_CONST_CACHE_INVALIDATE);
307
308 iris_cache_sets_clear(batch);
309 }
310
311 void
312 iris_cache_flush_for_read(struct iris_batch *batch,
313 struct iris_bo *bo)
314 {
315 if (_mesa_hash_table_search_pre_hashed(batch->cache.render, bo->hash, bo) ||
316 _mesa_set_search_pre_hashed(batch->cache.depth, bo->hash, bo))
317 iris_flush_depth_and_render_caches(batch);
318 }
319
320 static void *
321 format_aux_tuple(enum isl_format format, enum isl_aux_usage aux_usage)
322 {
323 return (void *)(uintptr_t)((uint32_t)format << 8 | aux_usage);
324 }
325
326 void
327 iris_cache_flush_for_render(struct iris_batch *batch,
328 struct iris_bo *bo,
329 enum isl_format format,
330 enum isl_aux_usage aux_usage)
331 {
332 if (_mesa_set_search_pre_hashed(batch->cache.depth, bo->hash, bo))
333 iris_flush_depth_and_render_caches(batch);
334
335 /* Check to see if this bo has been used by a previous rendering operation
336 * but with a different format or aux usage. If it has, flush the render
337 * cache so we ensure that it's only in there with one format or aux usage
338 * at a time.
339 *
340 * Even though it's not obvious, this can easily happen in practice.
341 * Suppose a client is blending on a surface with sRGB encode enabled on
342 * gen9. This implies that you get AUX_USAGE_CCS_D at best. If the client
343 * then disables sRGB decode and continues blending we will flip on
344 * AUX_USAGE_CCS_E without doing any sort of resolve in-between (this is
345 * perfectly valid since CCS_E is a subset of CCS_D). However, this means
346 * that we have fragments in-flight which are rendering with UNORM+CCS_E
347 * and other fragments in-flight with SRGB+CCS_D on the same surface at the
348 * same time and the pixel scoreboard and color blender are trying to sort
349 * it all out. This ends badly (i.e. GPU hangs).
350 *
351 * To date, we have never observed GPU hangs or even corruption to be
352 * associated with switching the format, only the aux usage. However,
353 * there are comments in various docs which indicate that the render cache
354 * isn't 100% resilient to format changes. We may as well be conservative
355 * and flush on format changes too. We can always relax this later if we
356 * find it to be a performance problem.
357 */
358 struct hash_entry *entry =
359 _mesa_hash_table_search_pre_hashed(batch->cache.render, bo->hash, bo);
360 if (entry && entry->data != format_aux_tuple(format, aux_usage))
361 iris_flush_depth_and_render_caches(batch);
362 }
363
364 void
365 iris_render_cache_add_bo(struct iris_batch *batch,
366 struct iris_bo *bo,
367 enum isl_format format,
368 enum isl_aux_usage aux_usage)
369 {
370 #ifndef NDEBUG
371 struct hash_entry *entry =
372 _mesa_hash_table_search_pre_hashed(batch->cache.render, bo->hash, bo);
373 if (entry) {
374 /* Otherwise, someone didn't do a flush_for_render and that would be
375 * very bad indeed.
376 */
377 assert(entry->data == format_aux_tuple(format, aux_usage));
378 }
379 #endif
380
381 _mesa_hash_table_insert_pre_hashed(batch->cache.render, bo->hash, bo,
382 format_aux_tuple(format, aux_usage));
383 }
384
385 void
386 iris_cache_flush_for_depth(struct iris_batch *batch,
387 struct iris_bo *bo)
388 {
389 if (_mesa_hash_table_search_pre_hashed(batch->cache.render, bo->hash, bo))
390 iris_flush_depth_and_render_caches(batch);
391 }
392
393 void
394 iris_depth_cache_add_bo(struct iris_batch *batch, struct iris_bo *bo)
395 {
396 _mesa_set_add_pre_hashed(batch->cache.depth, bo->hash, bo);
397 }
398
399 static void
400 iris_resolve_color(struct iris_context *ice,
401 struct iris_batch *batch,
402 struct iris_resource *res,
403 unsigned level, unsigned layer,
404 enum isl_aux_op resolve_op)
405 {
406 //DBG("%s to mt %p level %u layer %u\n", __FUNCTION__, mt, level, layer);
407
408 struct blorp_surf surf;
409 iris_blorp_surf_for_resource(&surf, &res->base, res->aux.usage, level,
410 true);
411
412 iris_batch_maybe_flush(batch, 1500);
413
414 /* Ivybridge PRM Vol 2, Part 1, "11.7 MCS Buffer for Render Target(s)":
415 *
416 * "Any transition from any value in {Clear, Render, Resolve} to a
417 * different value in {Clear, Render, Resolve} requires end of pipe
418 * synchronization."
419 *
420 * In other words, fast clear ops are not properly synchronized with
421 * other drawing. We need to use a PIPE_CONTROL to ensure that the
422 * contents of the previous draw hit the render target before we resolve
423 * and again afterwards to ensure that the resolve is complete before we
424 * do any more regular drawing.
425 */
426 iris_emit_end_of_pipe_sync(batch, PIPE_CONTROL_RENDER_TARGET_FLUSH);
427
428 struct blorp_batch blorp_batch;
429 blorp_batch_init(&ice->blorp, &blorp_batch, batch, 0);
430 blorp_ccs_resolve(&blorp_batch, &surf, level, layer, 1,
431 isl_format_srgb_to_linear(res->surf.format),
432 resolve_op);
433 blorp_batch_finish(&blorp_batch);
434
435 /* See comment above */
436 iris_emit_end_of_pipe_sync(batch, PIPE_CONTROL_RENDER_TARGET_FLUSH);
437 }
438
439 static void
440 iris_mcs_partial_resolve(struct iris_context *ice,
441 struct iris_batch *batch,
442 struct iris_resource *res,
443 uint32_t start_layer,
444 uint32_t num_layers)
445 {
446 //DBG("%s to mt %p layers %u-%u\n", __FUNCTION__, mt,
447 //start_layer, start_layer + num_layers - 1);
448
449 assert(res->aux.usage == ISL_AUX_USAGE_MCS);
450
451 struct blorp_surf surf;
452 iris_blorp_surf_for_resource(&surf, &res->base, res->aux.usage, 0, true);
453
454 struct blorp_batch blorp_batch;
455 blorp_batch_init(&ice->blorp, &blorp_batch, batch, 0);
456 blorp_mcs_partial_resolve(&blorp_batch, &surf, res->surf.format,
457 start_layer, num_layers);
458 blorp_batch_finish(&blorp_batch);
459 }
460
461
462 /**
463 * Return true if the format that will be used to access the resource is
464 * CCS_E-compatible with the resource's linear/non-sRGB format.
465 *
466 * Why use the linear format? Well, although the resourcemay be specified
467 * with an sRGB format, the usage of that color space/format can be toggled.
468 * Since our HW tends to support more linear formats than sRGB ones, we use
469 * this format variant for check for CCS_E compatibility.
470 */
471 static bool
472 format_ccs_e_compat_with_resource(const struct gen_device_info *devinfo,
473 const struct iris_resource *res,
474 enum isl_format access_format)
475 {
476 assert(res->aux.usage == ISL_AUX_USAGE_CCS_E);
477
478 enum isl_format isl_format = isl_format_srgb_to_linear(res->surf.format);
479 return isl_formats_are_ccs_e_compatible(devinfo, isl_format, access_format);
480 }
481
482 static bool
483 sample_with_hiz(const struct gen_device_info *devinfo,
484 const struct iris_resource *res)
485 {
486 if (!devinfo->has_sample_with_hiz)
487 return false;
488
489 if (res->aux.usage != ISL_AUX_USAGE_HIZ)
490 return false;
491
492 /* It seems the hardware won't fallback to the depth buffer if some of the
493 * mipmap levels aren't available in the HiZ buffer. So we need all levels
494 * of the texture to be HiZ enabled.
495 */
496 for (unsigned level = 0; level < res->surf.levels; ++level) {
497 if (!iris_resource_level_has_hiz(res, level))
498 return false;
499 }
500
501 /* If compressed multisampling is enabled, then we use it for the auxiliary
502 * buffer instead.
503 *
504 * From the BDW PRM (Volume 2d: Command Reference: Structures
505 * RENDER_SURFACE_STATE.AuxiliarySurfaceMode):
506 *
507 * "If this field is set to AUX_HIZ, Number of Multisamples must be
508 * MULTISAMPLECOUNT_1, and Surface Type cannot be SURFTYPE_3D.
509 *
510 * There is no such blurb for 1D textures, but there is sufficient evidence
511 * that this is broken on SKL+.
512 */
513 // XXX: i965 disables this for arrays too, is that reasonable?
514 return res->surf.samples == 1 && res->surf.dim == ISL_SURF_DIM_2D;
515 }
516
517 /**
518 * Perform a HiZ or depth resolve operation.
519 *
520 * For an overview of HiZ ops, see the following sections of the Sandy Bridge
521 * PRM, Volume 1, Part 2:
522 * - 7.5.3.1 Depth Buffer Clear
523 * - 7.5.3.2 Depth Buffer Resolve
524 * - 7.5.3.3 Hierarchical Depth Buffer Resolve
525 */
526 static void
527 iris_hiz_exec(struct iris_context *ice,
528 struct iris_batch *batch,
529 struct iris_resource *res,
530 unsigned int level, unsigned int start_layer,
531 unsigned int num_layers, enum isl_aux_op op)
532 {
533 assert(iris_resource_level_has_hiz(res, level));
534 assert(op != ISL_AUX_OP_NONE);
535 const char *name = NULL;
536
537 switch (op) {
538 case ISL_AUX_OP_FULL_RESOLVE:
539 name = "depth resolve";
540 break;
541 case ISL_AUX_OP_AMBIGUATE:
542 name = "hiz ambiguate";
543 break;
544 case ISL_AUX_OP_FAST_CLEAR:
545 name = "depth clear";
546 break;
547 case ISL_AUX_OP_PARTIAL_RESOLVE:
548 case ISL_AUX_OP_NONE:
549 unreachable("Invalid HiZ op");
550 }
551
552 //DBG("%s %s to mt %p level %d layers %d-%d\n",
553 //__func__, name, mt, level, start_layer, start_layer + num_layers - 1);
554
555 /* The following stalls and flushes are only documented to be required
556 * for HiZ clear operations. However, they also seem to be required for
557 * resolve operations.
558 *
559 * From the Ivybridge PRM, volume 2, "Depth Buffer Clear":
560 *
561 * "If other rendering operations have preceded this clear, a
562 * PIPE_CONTROL with depth cache flush enabled, Depth Stall bit
563 * enabled must be issued before the rectangle primitive used for
564 * the depth buffer clear operation."
565 *
566 * Same applies for Gen8 and Gen9.
567 *
568 * In addition, from the Ivybridge PRM, volume 2, 1.10.4.1
569 * PIPE_CONTROL, Depth Cache Flush Enable:
570 *
571 * "This bit must not be set when Depth Stall Enable bit is set in
572 * this packet."
573 *
574 * This is confirmed to hold for real, Haswell gets immediate gpu hangs.
575 *
576 * Therefore issue two pipe control flushes, one for cache flush and
577 * another for depth stall.
578 */
579 iris_emit_pipe_control_flush(batch,
580 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
581 PIPE_CONTROL_CS_STALL);
582
583 iris_emit_pipe_control_flush(batch, PIPE_CONTROL_DEPTH_STALL);
584
585 assert(res->aux.usage == ISL_AUX_USAGE_HIZ && res->aux.bo);
586
587 iris_batch_maybe_flush(batch, 1500);
588
589 struct blorp_surf surf;
590 iris_blorp_surf_for_resource(&surf, &res->base, ISL_AUX_USAGE_HIZ,
591 level, true);
592
593 struct blorp_batch blorp_batch;
594 blorp_batch_init(&ice->blorp, &blorp_batch, batch,
595 BLORP_BATCH_NO_UPDATE_CLEAR_COLOR);
596 blorp_hiz_op(&blorp_batch, &surf, level, start_layer, num_layers, op);
597 blorp_batch_finish(&blorp_batch);
598
599 /* The following stalls and flushes are only documented to be required
600 * for HiZ clear operations. However, they also seem to be required for
601 * resolve operations.
602 *
603 * From the Broadwell PRM, volume 7, "Depth Buffer Clear":
604 *
605 * "Depth buffer clear pass using any of the methods (WM_STATE,
606 * 3DSTATE_WM or 3DSTATE_WM_HZ_OP) must be followed by a
607 * PIPE_CONTROL command with DEPTH_STALL bit and Depth FLUSH bits
608 * "set" before starting to render. DepthStall and DepthFlush are
609 * not needed between consecutive depth clear passes nor is it
610 * required if the depth clear pass was done with
611 * 'full_surf_clear' bit set in the 3DSTATE_WM_HZ_OP."
612 *
613 * TODO: Such as the spec says, this could be conditional.
614 */
615 iris_emit_pipe_control_flush(batch,
616 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
617 PIPE_CONTROL_DEPTH_STALL);
618 }
619
620 /**
621 * Does the resource's slice have hiz enabled?
622 */
623 bool
624 iris_resource_level_has_hiz(const struct iris_resource *res, uint32_t level)
625 {
626 iris_resource_check_level_layer(res, level, 0);
627 return res->aux.has_hiz & 1 << level;
628 }
629
630 /** \brief Assert that the level and layer are valid for the resource. */
631 void
632 iris_resource_check_level_layer(UNUSED const struct iris_resource *res,
633 UNUSED uint32_t level, UNUSED uint32_t layer)
634 {
635 assert(level < res->surf.levels);
636 assert(layer < util_num_layers(&res->base, level));
637 }
638
639 static inline uint32_t
640 miptree_level_range_length(const struct iris_resource *res,
641 uint32_t start_level, uint32_t num_levels)
642 {
643 assert(start_level < res->surf.levels);
644
645 if (num_levels == INTEL_REMAINING_LAYERS)
646 num_levels = res->surf.levels;
647
648 /* Check for overflow */
649 assert(start_level + num_levels >= start_level);
650 assert(start_level + num_levels <= res->surf.levels);
651
652 return num_levels;
653 }
654
655 static inline uint32_t
656 miptree_layer_range_length(const struct iris_resource *res, uint32_t level,
657 uint32_t start_layer, uint32_t num_layers)
658 {
659 assert(level <= res->base.last_level);
660
661 const uint32_t total_num_layers = iris_get_num_logical_layers(res, level);
662 assert(start_layer < total_num_layers);
663 if (num_layers == INTEL_REMAINING_LAYERS)
664 num_layers = total_num_layers - start_layer;
665 /* Check for overflow */
666 assert(start_layer + num_layers >= start_layer);
667 assert(start_layer + num_layers <= total_num_layers);
668
669 return num_layers;
670 }
671
672 static bool
673 has_color_unresolved(const struct iris_resource *res,
674 unsigned start_level, unsigned num_levels,
675 unsigned start_layer, unsigned num_layers)
676 {
677 if (!res->aux.bo)
678 return false;
679
680 /* Clamp the level range to fit the resource */
681 num_levels = miptree_level_range_length(res, start_level, num_levels);
682
683 for (uint32_t l = 0; l < num_levels; l++) {
684 const uint32_t level = start_level + l;
685 const uint32_t level_layers =
686 miptree_layer_range_length(res, level, start_layer, num_layers);
687 for (unsigned a = 0; a < level_layers; a++) {
688 enum isl_aux_state aux_state =
689 iris_resource_get_aux_state(res, level, start_layer + a);
690 assert(aux_state != ISL_AUX_STATE_AUX_INVALID);
691 if (aux_state != ISL_AUX_STATE_PASS_THROUGH)
692 return true;
693 }
694 }
695
696 return false;
697 }
698
699 static enum isl_aux_op
700 get_ccs_d_resolve_op(enum isl_aux_state aux_state,
701 enum isl_aux_usage aux_usage,
702 bool fast_clear_supported)
703 {
704 assert(aux_usage == ISL_AUX_USAGE_NONE || aux_usage == ISL_AUX_USAGE_CCS_D);
705
706 const bool ccs_supported = aux_usage == ISL_AUX_USAGE_CCS_D;
707
708 assert(ccs_supported == fast_clear_supported);
709
710 switch (aux_state) {
711 case ISL_AUX_STATE_CLEAR:
712 case ISL_AUX_STATE_PARTIAL_CLEAR:
713 if (!ccs_supported)
714 return ISL_AUX_OP_FULL_RESOLVE;
715 else
716 return ISL_AUX_OP_NONE;
717
718 case ISL_AUX_STATE_PASS_THROUGH:
719 return ISL_AUX_OP_NONE;
720
721 case ISL_AUX_STATE_RESOLVED:
722 case ISL_AUX_STATE_AUX_INVALID:
723 case ISL_AUX_STATE_COMPRESSED_CLEAR:
724 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
725 break;
726 }
727
728 unreachable("Invalid aux state for CCS_D");
729 }
730
731 static enum isl_aux_op
732 get_ccs_e_resolve_op(enum isl_aux_state aux_state,
733 enum isl_aux_usage aux_usage,
734 bool fast_clear_supported)
735 {
736 /* CCS_E surfaces can be accessed as CCS_D if we're careful. */
737 assert(aux_usage == ISL_AUX_USAGE_NONE ||
738 aux_usage == ISL_AUX_USAGE_CCS_D ||
739 aux_usage == ISL_AUX_USAGE_CCS_E);
740
741 if (aux_usage == ISL_AUX_USAGE_CCS_D)
742 assert(fast_clear_supported);
743
744 switch (aux_state) {
745 case ISL_AUX_STATE_CLEAR:
746 case ISL_AUX_STATE_PARTIAL_CLEAR:
747 if (fast_clear_supported)
748 return ISL_AUX_OP_NONE;
749 else if (aux_usage == ISL_AUX_USAGE_CCS_E)
750 return ISL_AUX_OP_PARTIAL_RESOLVE;
751 else
752 return ISL_AUX_OP_FULL_RESOLVE;
753
754 case ISL_AUX_STATE_COMPRESSED_CLEAR:
755 if (aux_usage != ISL_AUX_USAGE_CCS_E)
756 return ISL_AUX_OP_FULL_RESOLVE;
757 else if (!fast_clear_supported)
758 return ISL_AUX_OP_PARTIAL_RESOLVE;
759 else
760 return ISL_AUX_OP_NONE;
761
762 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
763 if (aux_usage != ISL_AUX_USAGE_CCS_E)
764 return ISL_AUX_OP_FULL_RESOLVE;
765 else
766 return ISL_AUX_OP_NONE;
767
768 case ISL_AUX_STATE_PASS_THROUGH:
769 return ISL_AUX_OP_NONE;
770
771 case ISL_AUX_STATE_RESOLVED:
772 case ISL_AUX_STATE_AUX_INVALID:
773 break;
774 }
775
776 unreachable("Invalid aux state for CCS_E");
777 }
778
779 static void
780 iris_resource_prepare_ccs_access(struct iris_context *ice,
781 struct iris_batch *batch,
782 struct iris_resource *res,
783 uint32_t level, uint32_t layer,
784 enum isl_aux_usage aux_usage,
785 bool fast_clear_supported)
786 {
787 enum isl_aux_state aux_state = iris_resource_get_aux_state(res, level, layer);
788
789 enum isl_aux_op resolve_op;
790 if (res->aux.usage == ISL_AUX_USAGE_CCS_E) {
791 resolve_op = get_ccs_e_resolve_op(aux_state, aux_usage,
792 fast_clear_supported);
793 } else {
794 assert(res->aux.usage == ISL_AUX_USAGE_CCS_D);
795 resolve_op = get_ccs_d_resolve_op(aux_state, aux_usage,
796 fast_clear_supported);
797 }
798
799 if (resolve_op != ISL_AUX_OP_NONE) {
800 iris_resolve_color(ice, batch, res, level, layer, resolve_op);
801
802 switch (resolve_op) {
803 case ISL_AUX_OP_FULL_RESOLVE:
804 /* The CCS full resolve operation destroys the CCS and sets it to the
805 * pass-through state. (You can also think of this as being both a
806 * resolve and an ambiguate in one operation.)
807 */
808 iris_resource_set_aux_state(res, level, layer, 1,
809 ISL_AUX_STATE_PASS_THROUGH);
810 break;
811
812 case ISL_AUX_OP_PARTIAL_RESOLVE:
813 iris_resource_set_aux_state(res, level, layer, 1,
814 ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
815 break;
816
817 default:
818 unreachable("Invalid resolve op");
819 }
820 }
821 }
822
823 static void
824 iris_resource_finish_ccs_write(struct iris_context *ice,
825 struct iris_resource *res,
826 uint32_t level, uint32_t layer,
827 enum isl_aux_usage aux_usage)
828 {
829 assert(aux_usage == ISL_AUX_USAGE_NONE ||
830 aux_usage == ISL_AUX_USAGE_CCS_D ||
831 aux_usage == ISL_AUX_USAGE_CCS_E);
832
833 enum isl_aux_state aux_state =
834 iris_resource_get_aux_state(res, level, layer);
835
836 if (res->aux.usage == ISL_AUX_USAGE_CCS_E) {
837 switch (aux_state) {
838 case ISL_AUX_STATE_CLEAR:
839 case ISL_AUX_STATE_PARTIAL_CLEAR:
840 assert(aux_usage == ISL_AUX_USAGE_CCS_E ||
841 aux_usage == ISL_AUX_USAGE_CCS_D);
842
843 if (aux_usage == ISL_AUX_USAGE_CCS_E) {
844 iris_resource_set_aux_state(res, level, layer, 1,
845 ISL_AUX_STATE_COMPRESSED_CLEAR);
846 } else if (aux_state != ISL_AUX_STATE_PARTIAL_CLEAR) {
847 iris_resource_set_aux_state(res, level, layer, 1,
848 ISL_AUX_STATE_PARTIAL_CLEAR);
849 }
850 break;
851
852 case ISL_AUX_STATE_COMPRESSED_CLEAR:
853 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
854 assert(aux_usage == ISL_AUX_USAGE_CCS_E);
855 break; /* Nothing to do */
856
857 case ISL_AUX_STATE_PASS_THROUGH:
858 if (aux_usage == ISL_AUX_USAGE_CCS_E) {
859 iris_resource_set_aux_state(res, level, layer, 1,
860 ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
861 } else {
862 /* Nothing to do */
863 }
864 break;
865
866 case ISL_AUX_STATE_RESOLVED:
867 case ISL_AUX_STATE_AUX_INVALID:
868 unreachable("Invalid aux state for CCS_E");
869 }
870 } else {
871 assert(res->aux.usage == ISL_AUX_USAGE_CCS_D);
872 /* CCS_D is a bit simpler */
873 switch (aux_state) {
874 case ISL_AUX_STATE_CLEAR:
875 assert(aux_usage == ISL_AUX_USAGE_CCS_D);
876 iris_resource_set_aux_state(res, level, layer, 1,
877 ISL_AUX_STATE_PARTIAL_CLEAR);
878 break;
879
880 case ISL_AUX_STATE_PARTIAL_CLEAR:
881 assert(aux_usage == ISL_AUX_USAGE_CCS_D);
882 break; /* Nothing to do */
883
884 case ISL_AUX_STATE_PASS_THROUGH:
885 /* Nothing to do */
886 break;
887
888 case ISL_AUX_STATE_COMPRESSED_CLEAR:
889 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
890 case ISL_AUX_STATE_RESOLVED:
891 case ISL_AUX_STATE_AUX_INVALID:
892 unreachable("Invalid aux state for CCS_D");
893 }
894 }
895 }
896
897 static void
898 iris_resource_prepare_mcs_access(struct iris_context *ice,
899 struct iris_batch *batch,
900 struct iris_resource *res,
901 uint32_t layer,
902 enum isl_aux_usage aux_usage,
903 bool fast_clear_supported)
904 {
905 assert(aux_usage == ISL_AUX_USAGE_MCS);
906
907 switch (iris_resource_get_aux_state(res, 0, layer)) {
908 case ISL_AUX_STATE_CLEAR:
909 case ISL_AUX_STATE_COMPRESSED_CLEAR:
910 if (!fast_clear_supported) {
911 iris_mcs_partial_resolve(ice, batch, res, layer, 1);
912 iris_resource_set_aux_state(res, 0, layer, 1,
913 ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
914 }
915 break;
916
917 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
918 break; /* Nothing to do */
919
920 case ISL_AUX_STATE_RESOLVED:
921 case ISL_AUX_STATE_PASS_THROUGH:
922 case ISL_AUX_STATE_AUX_INVALID:
923 case ISL_AUX_STATE_PARTIAL_CLEAR:
924 unreachable("Invalid aux state for MCS");
925 }
926 }
927
928 static void
929 iris_resource_finish_mcs_write(struct iris_context *ice,
930 struct iris_resource *res,
931 uint32_t layer,
932 enum isl_aux_usage aux_usage)
933 {
934 assert(aux_usage == ISL_AUX_USAGE_MCS);
935
936 switch (iris_resource_get_aux_state(res, 0, layer)) {
937 case ISL_AUX_STATE_CLEAR:
938 iris_resource_set_aux_state(res, 0, layer, 1,
939 ISL_AUX_STATE_COMPRESSED_CLEAR);
940 break;
941
942 case ISL_AUX_STATE_COMPRESSED_CLEAR:
943 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
944 break; /* Nothing to do */
945
946 case ISL_AUX_STATE_RESOLVED:
947 case ISL_AUX_STATE_PASS_THROUGH:
948 case ISL_AUX_STATE_AUX_INVALID:
949 case ISL_AUX_STATE_PARTIAL_CLEAR:
950 unreachable("Invalid aux state for MCS");
951 }
952 }
953
954 static void
955 iris_resource_prepare_hiz_access(struct iris_context *ice,
956 struct iris_batch *batch,
957 struct iris_resource *res,
958 uint32_t level, uint32_t layer,
959 enum isl_aux_usage aux_usage,
960 bool fast_clear_supported)
961 {
962 assert(aux_usage == ISL_AUX_USAGE_NONE || aux_usage == ISL_AUX_USAGE_HIZ);
963
964 enum isl_aux_op hiz_op = ISL_AUX_OP_NONE;
965 switch (iris_resource_get_aux_state(res, level, layer)) {
966 case ISL_AUX_STATE_CLEAR:
967 case ISL_AUX_STATE_COMPRESSED_CLEAR:
968 if (aux_usage != ISL_AUX_USAGE_HIZ || !fast_clear_supported)
969 hiz_op = ISL_AUX_OP_FULL_RESOLVE;
970 break;
971
972 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
973 if (aux_usage != ISL_AUX_USAGE_HIZ)
974 hiz_op = ISL_AUX_OP_FULL_RESOLVE;
975 break;
976
977 case ISL_AUX_STATE_PASS_THROUGH:
978 case ISL_AUX_STATE_RESOLVED:
979 break;
980
981 case ISL_AUX_STATE_AUX_INVALID:
982 if (aux_usage == ISL_AUX_USAGE_HIZ)
983 hiz_op = ISL_AUX_OP_AMBIGUATE;
984 break;
985
986 case ISL_AUX_STATE_PARTIAL_CLEAR:
987 unreachable("Invalid HiZ state");
988 }
989
990 if (hiz_op != ISL_AUX_OP_NONE) {
991 iris_hiz_exec(ice, batch, res, level, layer, 1, hiz_op);
992
993 switch (hiz_op) {
994 case ISL_AUX_OP_FULL_RESOLVE:
995 iris_resource_set_aux_state(res, level, layer, 1,
996 ISL_AUX_STATE_RESOLVED);
997 break;
998
999 case ISL_AUX_OP_AMBIGUATE:
1000 /* The HiZ resolve operation is actually an ambiguate */
1001 iris_resource_set_aux_state(res, level, layer, 1,
1002 ISL_AUX_STATE_PASS_THROUGH);
1003 break;
1004
1005 default:
1006 unreachable("Invalid HiZ op");
1007 }
1008 }
1009 }
1010
1011 static void
1012 iris_resource_finish_hiz_write(struct iris_context *ice,
1013 struct iris_resource *res,
1014 uint32_t level, uint32_t layer,
1015 enum isl_aux_usage aux_usage)
1016 {
1017 assert(aux_usage == ISL_AUX_USAGE_NONE || aux_usage == ISL_AUX_USAGE_HIZ);
1018
1019 switch (iris_resource_get_aux_state(res, level, layer)) {
1020 case ISL_AUX_STATE_CLEAR:
1021 assert(aux_usage == ISL_AUX_USAGE_HIZ);
1022 iris_resource_set_aux_state(res, level, layer, 1,
1023 ISL_AUX_STATE_COMPRESSED_CLEAR);
1024 break;
1025
1026 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
1027 case ISL_AUX_STATE_COMPRESSED_CLEAR:
1028 assert(aux_usage == ISL_AUX_USAGE_HIZ);
1029 break; /* Nothing to do */
1030
1031 case ISL_AUX_STATE_RESOLVED:
1032 if (aux_usage == ISL_AUX_USAGE_HIZ) {
1033 iris_resource_set_aux_state(res, level, layer, 1,
1034 ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
1035 } else {
1036 iris_resource_set_aux_state(res, level, layer, 1,
1037 ISL_AUX_STATE_AUX_INVALID);
1038 }
1039 break;
1040
1041 case ISL_AUX_STATE_PASS_THROUGH:
1042 if (aux_usage == ISL_AUX_USAGE_HIZ) {
1043 iris_resource_set_aux_state(res, level, layer, 1,
1044 ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
1045 }
1046 break;
1047
1048 case ISL_AUX_STATE_AUX_INVALID:
1049 assert(aux_usage != ISL_AUX_USAGE_HIZ);
1050 break;
1051
1052 case ISL_AUX_STATE_PARTIAL_CLEAR:
1053 unreachable("Invalid HiZ state");
1054 }
1055 }
1056
1057 void
1058 iris_resource_prepare_access(struct iris_context *ice,
1059 struct iris_batch *batch,
1060 struct iris_resource *res,
1061 uint32_t start_level, uint32_t num_levels,
1062 uint32_t start_layer, uint32_t num_layers,
1063 enum isl_aux_usage aux_usage,
1064 bool fast_clear_supported)
1065 {
1066 num_levels = miptree_level_range_length(res, start_level, num_levels);
1067
1068 switch (res->aux.usage) {
1069 case ISL_AUX_USAGE_NONE:
1070 /* Nothing to do */
1071 break;
1072
1073 case ISL_AUX_USAGE_MCS:
1074 assert(start_level == 0 && num_levels == 1);
1075 const uint32_t level_layers =
1076 miptree_layer_range_length(res, 0, start_layer, num_layers);
1077 for (uint32_t a = 0; a < level_layers; a++) {
1078 iris_resource_prepare_mcs_access(ice, batch, res, start_layer + a,
1079 aux_usage, fast_clear_supported);
1080 }
1081 break;
1082
1083 case ISL_AUX_USAGE_CCS_D:
1084 case ISL_AUX_USAGE_CCS_E:
1085 for (uint32_t l = 0; l < num_levels; l++) {
1086 const uint32_t level = start_level + l;
1087 const uint32_t level_layers =
1088 miptree_layer_range_length(res, level, start_layer, num_layers);
1089 for (uint32_t a = 0; a < level_layers; a++) {
1090 iris_resource_prepare_ccs_access(ice, batch, res, level,
1091 start_layer + a,
1092 aux_usage, fast_clear_supported);
1093 }
1094 }
1095 break;
1096
1097 case ISL_AUX_USAGE_HIZ:
1098 for (uint32_t l = 0; l < num_levels; l++) {
1099 const uint32_t level = start_level + l;
1100 if (!iris_resource_level_has_hiz(res, level))
1101 continue;
1102
1103 const uint32_t level_layers =
1104 miptree_layer_range_length(res, level, start_layer, num_layers);
1105 for (uint32_t a = 0; a < level_layers; a++) {
1106 iris_resource_prepare_hiz_access(ice, batch, res, level,
1107 start_layer + a, aux_usage,
1108 fast_clear_supported);
1109 }
1110 }
1111 break;
1112
1113 default:
1114 unreachable("Invalid aux usage");
1115 }
1116 }
1117
1118 void
1119 iris_resource_finish_write(struct iris_context *ice,
1120 struct iris_resource *res, uint32_t level,
1121 uint32_t start_layer, uint32_t num_layers,
1122 enum isl_aux_usage aux_usage)
1123 {
1124 num_layers = miptree_layer_range_length(res, level, start_layer, num_layers);
1125
1126 switch (res->aux.usage) {
1127 case ISL_AUX_USAGE_NONE:
1128 break;
1129
1130 case ISL_AUX_USAGE_MCS:
1131 for (uint32_t a = 0; a < num_layers; a++) {
1132 iris_resource_finish_mcs_write(ice, res, start_layer + a,
1133 aux_usage);
1134 }
1135 break;
1136
1137 case ISL_AUX_USAGE_CCS_D:
1138 case ISL_AUX_USAGE_CCS_E:
1139 for (uint32_t a = 0; a < num_layers; a++) {
1140 iris_resource_finish_ccs_write(ice, res, level, start_layer + a,
1141 aux_usage);
1142 }
1143 break;
1144
1145 case ISL_AUX_USAGE_HIZ:
1146 if (!iris_resource_level_has_hiz(res, level))
1147 return;
1148
1149 for (uint32_t a = 0; a < num_layers; a++) {
1150 iris_resource_finish_hiz_write(ice, res, level, start_layer + a,
1151 aux_usage);
1152 }
1153 break;
1154
1155 default:
1156 unreachable("Invavlid aux usage");
1157 }
1158 }
1159
1160 enum isl_aux_state
1161 iris_resource_get_aux_state(const struct iris_resource *res,
1162 uint32_t level, uint32_t layer)
1163 {
1164 iris_resource_check_level_layer(res, level, layer);
1165
1166 if (res->surf.usage & ISL_SURF_USAGE_DEPTH_BIT) {
1167 assert(iris_resource_level_has_hiz(res, level));
1168 } else if (res->surf.usage & ISL_SURF_USAGE_STENCIL_BIT) {
1169 unreachable("Cannot get aux state for stencil");
1170 } else {
1171 assert(res->surf.samples == 1 ||
1172 res->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY);
1173 }
1174
1175 return res->aux.state[level][layer];
1176 }
1177
1178 void
1179 iris_resource_set_aux_state(struct iris_resource *res, uint32_t level,
1180 uint32_t start_layer, uint32_t num_layers,
1181 enum isl_aux_state aux_state)
1182 {
1183 num_layers = miptree_layer_range_length(res, level, start_layer, num_layers);
1184
1185 if (res->surf.usage & ISL_SURF_USAGE_DEPTH_BIT) {
1186 assert(iris_resource_level_has_hiz(res, level));
1187 } else if (res->surf.usage & ISL_SURF_USAGE_STENCIL_BIT) {
1188 unreachable("Cannot set aux state for stencil");
1189 } else {
1190 assert(res->surf.samples == 1 ||
1191 res->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY);
1192 }
1193
1194 for (unsigned a = 0; a < num_layers; a++) {
1195 if (res->aux.state[level][start_layer + a] != aux_state) {
1196 res->aux.state[level][start_layer + a] = aux_state;
1197 // XXX: dirty works differently
1198 // brw->ctx.NewDriverState |= BRW_NEW_AUX_STATE;
1199 }
1200 }
1201 }
1202
1203 /* On Gen9 color buffers may be compressed by the hardware (lossless
1204 * compression). There are, however, format restrictions and care needs to be
1205 * taken that the sampler engine is capable for re-interpreting a buffer with
1206 * format different the buffer was originally written with.
1207 *
1208 * For example, SRGB formats are not compressible and the sampler engine isn't
1209 * capable of treating RGBA_UNORM as SRGB_ALPHA. In such a case the underlying
1210 * color buffer needs to be resolved so that the sampling surface can be
1211 * sampled as non-compressed (i.e., without the auxiliary MCS buffer being
1212 * set).
1213 */
1214 static bool
1215 can_texture_with_ccs(const struct gen_device_info *devinfo,
1216 struct pipe_debug_callback *dbg,
1217 const struct iris_resource *res,
1218 enum isl_format view_format)
1219 {
1220 if (res->aux.usage != ISL_AUX_USAGE_CCS_E)
1221 return false;
1222
1223 if (!format_ccs_e_compat_with_resource(devinfo, res, view_format)) {
1224 const struct isl_format_layout *res_fmtl =
1225 isl_format_get_layout(res->surf.format);
1226 const struct isl_format_layout *view_fmtl =
1227 isl_format_get_layout(view_format);
1228
1229 perf_debug(dbg, "Incompatible sampling format (%s) for CCS (%s)\n",
1230 view_fmtl->name, res_fmtl->name);
1231
1232 return false;
1233 }
1234
1235 return true;
1236 }
1237
1238 enum isl_aux_usage
1239 iris_resource_texture_aux_usage(struct iris_context *ice,
1240 const struct iris_resource *res,
1241 enum isl_format view_format,
1242 enum gen9_astc5x5_wa_tex_type astc5x5_wa_bits)
1243 {
1244 struct iris_screen *screen = (void *) ice->ctx.screen;
1245 struct gen_device_info *devinfo = &screen->devinfo;
1246
1247 assert(devinfo->gen == 9 || astc5x5_wa_bits == 0);
1248
1249 /* On gen9, ASTC 5x5 textures cannot live in the sampler cache along side
1250 * CCS or HiZ compressed textures. See gen9_apply_astc5x5_wa_flush() for
1251 * details.
1252 */
1253 if ((astc5x5_wa_bits & GEN9_ASTC5X5_WA_TEX_TYPE_ASTC5x5) &&
1254 res->aux.usage != ISL_AUX_USAGE_MCS)
1255 return ISL_AUX_USAGE_NONE;
1256
1257 switch (res->aux.usage) {
1258 case ISL_AUX_USAGE_HIZ:
1259 if (sample_with_hiz(devinfo, res))
1260 return ISL_AUX_USAGE_HIZ;
1261 break;
1262
1263 case ISL_AUX_USAGE_MCS:
1264 return ISL_AUX_USAGE_MCS;
1265
1266 case ISL_AUX_USAGE_CCS_D:
1267 case ISL_AUX_USAGE_CCS_E:
1268 /* If we don't have any unresolved color, report an aux usage of
1269 * ISL_AUX_USAGE_NONE. This way, texturing won't even look at the
1270 * aux surface and we can save some bandwidth.
1271 */
1272 if (!has_color_unresolved(res, 0, INTEL_REMAINING_LEVELS,
1273 0, INTEL_REMAINING_LAYERS))
1274 return ISL_AUX_USAGE_NONE;
1275
1276 if (can_texture_with_ccs(devinfo, &ice->dbg, res, view_format))
1277 return ISL_AUX_USAGE_CCS_E;
1278 break;
1279
1280 default:
1281 break;
1282 }
1283
1284 return ISL_AUX_USAGE_NONE;
1285 }
1286
1287 static bool
1288 isl_formats_are_fast_clear_compatible(enum isl_format a, enum isl_format b)
1289 {
1290 /* On gen8 and earlier, the hardware was only capable of handling 0/1 clear
1291 * values so sRGB curve application was a no-op for all fast-clearable
1292 * formats.
1293 *
1294 * On gen9+, the hardware supports arbitrary clear values. For sRGB clear
1295 * values, the hardware interprets the floats, not as what would be
1296 * returned from the sampler (or written by the shader), but as being
1297 * between format conversion and sRGB curve application. This means that
1298 * we can switch between sRGB and UNORM without having to whack the clear
1299 * color.
1300 */
1301 return isl_format_srgb_to_linear(a) == isl_format_srgb_to_linear(b);
1302 }
1303
1304 void
1305 iris_resource_prepare_texture(struct iris_context *ice,
1306 struct iris_batch *batch,
1307 struct iris_resource *res,
1308 enum isl_format view_format,
1309 uint32_t start_level, uint32_t num_levels,
1310 uint32_t start_layer, uint32_t num_layers,
1311 enum gen9_astc5x5_wa_tex_type astc5x5_wa_bits)
1312 {
1313 enum isl_aux_usage aux_usage =
1314 iris_resource_texture_aux_usage(ice, res, view_format, astc5x5_wa_bits);
1315
1316 bool clear_supported = aux_usage != ISL_AUX_USAGE_NONE;
1317
1318 /* Clear color is specified as ints or floats and the conversion is done by
1319 * the sampler. If we have a texture view, we would have to perform the
1320 * clear color conversion manually. Just disable clear color.
1321 */
1322 if (!isl_formats_are_fast_clear_compatible(res->surf.format, view_format))
1323 clear_supported = false;
1324
1325 iris_resource_prepare_access(ice, batch, res, start_level, num_levels,
1326 start_layer, num_layers,
1327 aux_usage, clear_supported);
1328 }
1329
1330 void
1331 iris_resource_prepare_image(struct iris_context *ice,
1332 struct iris_batch *batch,
1333 struct iris_resource *res)
1334 {
1335 /* The data port doesn't understand any compression */
1336 iris_resource_prepare_access(ice, batch, res, 0, INTEL_REMAINING_LEVELS,
1337 0, INTEL_REMAINING_LAYERS,
1338 ISL_AUX_USAGE_NONE, false);
1339 }
1340
1341 enum isl_aux_usage
1342 iris_resource_render_aux_usage(struct iris_context *ice,
1343 struct iris_resource *res,
1344 enum isl_format render_format,
1345 bool blend_enabled,
1346 bool draw_aux_disabled)
1347 {
1348 struct iris_screen *screen = (void *) ice->ctx.screen;
1349 struct gen_device_info *devinfo = &screen->devinfo;
1350
1351 if (draw_aux_disabled)
1352 return ISL_AUX_USAGE_NONE;
1353
1354 switch (res->aux.usage) {
1355 case ISL_AUX_USAGE_MCS:
1356 return ISL_AUX_USAGE_MCS;
1357
1358 case ISL_AUX_USAGE_CCS_D:
1359 case ISL_AUX_USAGE_CCS_E:
1360 /* Gen9+ hardware technically supports non-0/1 clear colors with sRGB
1361 * formats. However, there are issues with blending where it doesn't
1362 * properly apply the sRGB curve to the clear color when blending.
1363 */
1364 /* XXX:
1365 if (devinfo->gen >= 9 && blend_enabled &&
1366 isl_format_is_srgb(render_format) &&
1367 !isl_color_value_is_zero_one(res->fast_clear_color, render_format))
1368 return ISL_AUX_USAGE_NONE;
1369 */
1370
1371 if (res->aux.usage == ISL_AUX_USAGE_CCS_E &&
1372 format_ccs_e_compat_with_resource(devinfo, res, render_format))
1373 return ISL_AUX_USAGE_CCS_E;
1374
1375 /* Otherwise, we have to fall back to CCS_D */
1376 return ISL_AUX_USAGE_CCS_D;
1377
1378 default:
1379 return ISL_AUX_USAGE_NONE;
1380 }
1381 }
1382
1383 void
1384 iris_resource_prepare_render(struct iris_context *ice,
1385 struct iris_batch *batch,
1386 struct iris_resource *res, uint32_t level,
1387 uint32_t start_layer, uint32_t layer_count,
1388 enum isl_aux_usage aux_usage)
1389 {
1390 iris_resource_prepare_access(ice, batch, res, level, 1, start_layer,
1391 layer_count, aux_usage,
1392 aux_usage != ISL_AUX_USAGE_NONE);
1393 }
1394
1395 void
1396 iris_resource_finish_render(struct iris_context *ice,
1397 struct iris_resource *res, uint32_t level,
1398 uint32_t start_layer, uint32_t layer_count,
1399 enum isl_aux_usage aux_usage)
1400 {
1401 iris_resource_finish_write(ice, res, level, start_layer, layer_count,
1402 aux_usage);
1403 }
1404
1405 void
1406 iris_resource_prepare_depth(struct iris_context *ice,
1407 struct iris_batch *batch,
1408 struct iris_resource *res, uint32_t level,
1409 uint32_t start_layer, uint32_t layer_count)
1410 {
1411 iris_resource_prepare_access(ice, batch, res, level, 1, start_layer,
1412 layer_count, res->aux.usage, !!res->aux.bo);
1413 }
1414
1415 void
1416 iris_resource_finish_depth(struct iris_context *ice,
1417 struct iris_resource *res, uint32_t level,
1418 uint32_t start_layer, uint32_t layer_count,
1419 bool depth_written)
1420 {
1421 if (depth_written) {
1422 iris_resource_finish_write(ice, res, level, start_layer, layer_count,
1423 res->aux.usage);
1424 }
1425 }