#include "util/hash_table.h"
#include "util/set.h"
#include "iris_context.h"
+#include "compiler/nir/nir.h"
/**
* Disable auxiliary buffers if a renderbuffer is also bound as a texture
resolve_sampler_views(struct iris_context *ice,
struct iris_batch *batch,
struct iris_shader_state *shs,
+ const struct shader_info *info,
bool *draw_aux_buffer_disabled,
bool consider_framebuffer)
{
- uint32_t views = shs->bound_sampler_views;
-
- unsigned astc5x5_wa_bits = 0; // XXX: actual tracking
+ uint32_t views = info ? (shs->bound_sampler_views & info->textures_used) : 0;
while (views) {
const int i = u_bit_scan(&views);
struct iris_sampler_view *isv = shs->textures[i];
struct iris_resource *res = (void *) isv->base.texture;
- if (res->base.target == PIPE_BUFFER)
- continue;
+ if (res->base.target != PIPE_BUFFER) {
+ if (consider_framebuffer) {
+ disable_rb_aux_buffer(ice, draw_aux_buffer_disabled,
+ res, isv->view.base_level, isv->view.levels,
+ "for sampling");
+ }
- if (consider_framebuffer) {
- disable_rb_aux_buffer(ice, draw_aux_buffer_disabled,
- res, isv->view.base_level, isv->view.levels,
- "for sampling");
+ iris_resource_prepare_texture(ice, batch, res, isv->view.format,
+ isv->view.base_level, isv->view.levels,
+ isv->view.base_array_layer,
+ isv->view.array_len);
}
- iris_resource_prepare_texture(ice, batch, res, isv->view.format,
- isv->view.base_level, isv->view.levels,
- isv->view.base_array_layer,
- isv->view.array_len,
- astc5x5_wa_bits);
-
iris_cache_flush_for_read(batch, res->bo);
}
}
resolve_image_views(struct iris_context *ice,
struct iris_batch *batch,
struct iris_shader_state *shs,
+ const struct shader_info *info,
bool *draw_aux_buffer_disabled,
bool consider_framebuffer)
{
- uint32_t views = shs->bound_image_views;
+ uint32_t views = info ? (shs->bound_image_views & info->images_used) : 0;
while (views) {
const int i = u_bit_scan(&views);
- struct iris_resource *res = (void *) shs->image[i].res;
+ struct pipe_image_view *pview = &shs->image[i].base;
+ struct iris_resource *res = (void *) pview->resource;
+
+ if (res->base.target != PIPE_BUFFER) {
+ if (consider_framebuffer) {
+ disable_rb_aux_buffer(ice, draw_aux_buffer_disabled,
+ res, pview->u.tex.level, 1,
+ "as a shader image");
+ }
- if (res->base.target == PIPE_BUFFER)
- continue;
+ unsigned num_layers =
+ pview->u.tex.last_layer - pview->u.tex.first_layer + 1;
- if (consider_framebuffer) {
- disable_rb_aux_buffer(ice, draw_aux_buffer_disabled,
- res, 0, ~0, "as a shader image");
+ /* The data port doesn't understand any compression */
+ iris_resource_prepare_access(ice, batch, res,
+ pview->u.tex.level, 1,
+ pview->u.tex.first_layer, num_layers,
+ ISL_AUX_USAGE_NONE, false);
}
- iris_resource_prepare_image(ice, batch, res);
-
iris_cache_flush_for_read(batch, res->bo);
}
}
void
iris_predraw_resolve_inputs(struct iris_context *ice,
struct iris_batch *batch,
- struct iris_shader_state *shs,
bool *draw_aux_buffer_disabled,
+ gl_shader_stage stage,
bool consider_framebuffer)
{
- resolve_sampler_views(ice, batch, shs, draw_aux_buffer_disabled, consider_framebuffer);
- resolve_image_views(ice, batch, shs, draw_aux_buffer_disabled, consider_framebuffer);
+ struct iris_shader_state *shs = &ice->state.shaders[stage];
+ const struct shader_info *info = iris_get_shader_info(ice, stage);
+
+ uint64_t dirty = (IRIS_DIRTY_BINDINGS_VS << stage) |
+ (consider_framebuffer ? IRIS_DIRTY_BINDINGS_FS : 0);
- // XXX: ASTC hacks
+ if (ice->state.dirty & dirty) {
+ resolve_sampler_views(ice, batch, shs, info, draw_aux_buffer_disabled,
+ consider_framebuffer);
+ resolve_image_views(ice, batch, shs, info, draw_aux_buffer_disabled,
+ consider_framebuffer);
+ }
}
void
bool *draw_aux_buffer_disabled)
{
struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
- struct pipe_surface *zs_surf = cso_fb->zsbuf;
+ struct iris_screen *screen = (void *) ice->ctx.screen;
+ struct gen_device_info *devinfo = &screen->devinfo;
+ struct iris_uncompiled_shader *ish =
+ ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
+ const nir_shader *nir = ish->nir;
+
+ if (ice->state.dirty & IRIS_DIRTY_DEPTH_BUFFER) {
+ struct pipe_surface *zs_surf = cso_fb->zsbuf;
+
+ if (zs_surf) {
+ struct iris_resource *z_res, *s_res;
+ iris_get_depth_stencil_resources(zs_surf->texture, &z_res, &s_res);
+ unsigned num_layers =
+ zs_surf->u.tex.last_layer - zs_surf->u.tex.first_layer + 1;
+
+ if (z_res) {
+ iris_resource_prepare_depth(ice, batch, z_res,
+ zs_surf->u.tex.level,
+ zs_surf->u.tex.first_layer,
+ num_layers);
+ iris_cache_flush_for_depth(batch, z_res->bo);
+ }
- if (zs_surf) {
- struct iris_resource *z_res, *s_res;
- iris_get_depth_stencil_resources(zs_surf->texture, &z_res, &s_res);
- unsigned num_layers =
- zs_surf->u.tex.last_layer - zs_surf->u.tex.first_layer + 1;
+ if (s_res) {
+ iris_cache_flush_for_depth(batch, s_res->bo);
+ }
+ }
+ }
- if (z_res) {
- iris_resource_prepare_depth(ice, batch, z_res, zs_surf->u.tex.level,
- zs_surf->u.tex.first_layer, num_layers);
+ if (devinfo->gen == 8 && nir->info.outputs_read != 0) {
+ for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
+ if (cso_fb->cbufs[i]) {
+ struct iris_surface *surf = (void *) cso_fb->cbufs[i];
+ struct iris_resource *res = (void *) cso_fb->cbufs[i]->texture;
+
+ iris_resource_prepare_texture(ice, batch, res, surf->view.format,
+ surf->view.base_level, 1,
+ surf->view.base_array_layer,
+ surf->view.array_len);
+ }
}
}
- for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
- struct iris_surface *surf = (void *) cso_fb->cbufs[i];
- if (!surf)
- continue;
+ if (ice->state.dirty & (IRIS_DIRTY_BINDINGS_FS | IRIS_DIRTY_BLEND_STATE)) {
+ for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
+ struct iris_surface *surf = (void *) cso_fb->cbufs[i];
+ if (!surf)
+ continue;
- struct iris_resource *res = (void *) surf->base.texture;
+ struct iris_resource *res = (void *) surf->base.texture;
- enum isl_aux_usage aux_usage =
- iris_resource_render_aux_usage(ice, res, surf->view.format,
- ice->state.blend_enables & (1u << i),
- draw_aux_buffer_disabled[i]);
+ enum isl_aux_usage aux_usage =
+ iris_resource_render_aux_usage(ice, res, surf->view.format,
+ ice->state.blend_enables & (1u << i),
+ draw_aux_buffer_disabled[i]);
- // XXX: NEW_AUX_STATE
- ice->state.draw_aux_usage[i] = aux_usage;
+ if (ice->state.draw_aux_usage[i] != aux_usage) {
+ ice->state.draw_aux_usage[i] = aux_usage;
+ /* XXX: Need to track which bindings to make dirty */
+ ice->state.dirty |= IRIS_ALL_DIRTY_BINDINGS;
+ }
- iris_resource_prepare_render(ice, batch, res, surf->view.base_level,
- surf->view.base_array_layer,
- surf->view.array_len,
- aux_usage);
+ iris_resource_prepare_render(ice, batch, res, surf->view.base_level,
+ surf->view.base_array_layer,
+ surf->view.array_len,
+ aux_usage);
- iris_cache_flush_for_render(batch, res->bo, surf->view.format,
- ISL_AUX_USAGE_NONE);
+ iris_cache_flush_for_render(batch, res->bo, surf->view.format,
+ aux_usage);
+ }
}
}
struct iris_batch *batch)
{
struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
- struct pipe_surface *zs_surf = cso_fb->zsbuf;
// XXX: front buffer drawing?
+ bool may_have_resolved_depth =
+ ice->state.dirty & (IRIS_DIRTY_DEPTH_BUFFER |
+ IRIS_DIRTY_WM_DEPTH_STENCIL);
+
+ struct pipe_surface *zs_surf = cso_fb->zsbuf;
if (zs_surf) {
struct iris_resource *z_res, *s_res;
iris_get_depth_stencil_resources(zs_surf->texture, &z_res, &s_res);
zs_surf->u.tex.last_layer - zs_surf->u.tex.first_layer + 1;
if (z_res) {
- iris_resource_finish_depth(ice, z_res, zs_surf->u.tex.level,
- zs_surf->u.tex.first_layer, num_layers,
- ice->state.depth_writes_enabled);
+ if (may_have_resolved_depth) {
+ iris_resource_finish_depth(ice, z_res, zs_surf->u.tex.level,
+ zs_surf->u.tex.first_layer, num_layers,
+ ice->state.depth_writes_enabled);
+ }
if (ice->state.depth_writes_enabled)
iris_depth_cache_add_bo(batch, z_res->bo);
}
if (s_res) {
- iris_resource_finish_write(ice, s_res, zs_surf->u.tex.level,
- zs_surf->u.tex.first_layer, num_layers,
- ISL_AUX_USAGE_NONE);
+ if (may_have_resolved_depth && ice->state.stencil_writes_enabled) {
+ iris_resource_finish_write(ice, s_res, zs_surf->u.tex.level,
+ zs_surf->u.tex.first_layer, num_layers,
+ s_res->aux.usage);
+ }
if (ice->state.stencil_writes_enabled)
iris_depth_cache_add_bo(batch, s_res->bo);
}
}
+ bool may_have_resolved_color =
+ ice->state.dirty & (IRIS_DIRTY_BINDINGS_FS | IRIS_DIRTY_BLEND_STATE);
+
for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
struct iris_surface *surf = (void *) cso_fb->cbufs[i];
if (!surf)
continue;
struct iris_resource *res = (void *) surf->base.texture;
- union pipe_surface_desc *desc = &surf->base.u;
- unsigned num_layers = desc->tex.last_layer - desc->tex.first_layer + 1;
enum isl_aux_usage aux_usage = ice->state.draw_aux_usage[i];
- iris_render_cache_add_bo(batch, res->bo, surf->view.format, aux_usage);
+ iris_render_cache_add_bo(batch, res->bo, surf->view.format,
+ aux_usage);
- iris_resource_finish_render(ice, res, desc->tex.level,
- desc->tex.first_layer, num_layers,
- aux_usage);
+ if (may_have_resolved_color) {
+ union pipe_surface_desc *desc = &surf->base.u;
+ unsigned num_layers =
+ desc->tex.last_layer - desc->tex.first_layer + 1;
+ iris_resource_finish_render(ice, res, desc->tex.level,
+ desc->tex.first_layer, num_layers,
+ aux_usage);
+ }
}
}
iris_flush_depth_and_render_caches(struct iris_batch *batch)
{
iris_emit_pipe_control_flush(batch,
+ "cache tracker: render-to-texture",
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_RENDER_TARGET_FLUSH |
PIPE_CONTROL_CS_STALL);
iris_emit_pipe_control_flush(batch,
+ "cache tracker: render-to-texture",
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_CONST_CACHE_INVALIDATE);
//DBG("%s to mt %p level %u layer %u\n", __FUNCTION__, mt, level, layer);
struct blorp_surf surf;
- iris_blorp_surf_for_resource(&surf, &res->base, res->aux.usage, level,
- true);
+ iris_blorp_surf_for_resource(&ice->vtbl, &batch->screen->isl_dev, &surf,
+ &res->base, res->aux.usage, level, true);
iris_batch_maybe_flush(batch, 1500);
* and again afterwards to ensure that the resolve is complete before we
* do any more regular drawing.
*/
- iris_emit_end_of_pipe_sync(batch, PIPE_CONTROL_RENDER_TARGET_FLUSH);
+ iris_emit_end_of_pipe_sync(batch, "color resolve: pre-flush",
+ PIPE_CONTROL_RENDER_TARGET_FLUSH);
struct blorp_batch blorp_batch;
blorp_batch_init(&ice->blorp, &blorp_batch, batch, 0);
- blorp_ccs_resolve(&blorp_batch, &surf, level, layer, 1,
- isl_format_srgb_to_linear(res->surf.format),
- resolve_op);
+ /* On Gen >= 12, Stencil buffer with lossless compression needs to be
+ * resolve with WM_HZ_OP packet.
+ */
+ if (res->aux.usage == ISL_AUX_USAGE_STC_CCS) {
+ blorp_hiz_stencil_op(&blorp_batch, &surf, level, layer,
+ 1, resolve_op);
+ } else {
+ blorp_ccs_resolve(&blorp_batch, &surf, level, layer, 1,
+ isl_format_srgb_to_linear(res->surf.format),
+ resolve_op);
+ }
blorp_batch_finish(&blorp_batch);
/* See comment above */
- iris_emit_end_of_pipe_sync(batch, PIPE_CONTROL_RENDER_TARGET_FLUSH);
+ iris_emit_end_of_pipe_sync(batch, "color resolve: post-flush",
+ PIPE_CONTROL_RENDER_TARGET_FLUSH);
}
static void
//DBG("%s to mt %p layers %u-%u\n", __FUNCTION__, mt,
//start_layer, start_layer + num_layers - 1);
- assert(res->aux.usage == ISL_AUX_USAGE_MCS);
+ assert(isl_aux_usage_has_mcs(res->aux.usage));
struct blorp_surf surf;
- iris_blorp_surf_for_resource(&surf, &res->base, res->aux.usage, 0, true);
+ iris_blorp_surf_for_resource(&ice->vtbl, &batch->screen->isl_dev, &surf,
+ &res->base, res->aux.usage, 0, true);
struct blorp_batch blorp_batch;
blorp_batch_init(&ice->blorp, &blorp_batch, batch, 0);
- blorp_mcs_partial_resolve(&blorp_batch, &surf, res->surf.format,
+ blorp_mcs_partial_resolve(&blorp_batch, &surf,
+ isl_format_srgb_to_linear(res->surf.format),
start_layer, num_layers);
blorp_batch_finish(&blorp_batch);
}
return isl_formats_are_ccs_e_compatible(devinfo, isl_format, access_format);
}
-static bool
-sample_with_hiz(const struct gen_device_info *devinfo,
- const struct iris_resource *res)
+bool
+iris_sample_with_depth_aux(const struct gen_device_info *devinfo,
+ const struct iris_resource *res)
{
- if (!devinfo->has_sample_with_hiz)
+ switch (res->aux.usage) {
+ case ISL_AUX_USAGE_HIZ:
+ if (devinfo->has_sample_with_hiz)
+ break;
return false;
-
- if (res->aux.usage != ISL_AUX_USAGE_HIZ)
+ case ISL_AUX_USAGE_HIZ_CCS:
return false;
+ case ISL_AUX_USAGE_HIZ_CCS_WT:
+ break;
+ default:
+ return false;
+ }
/* It seems the hardware won't fallback to the depth buffer if some of the
* mipmap levels aren't available in the HiZ buffer. So we need all levels
* - 7.5.3.2 Depth Buffer Resolve
* - 7.5.3.3 Hierarchical Depth Buffer Resolve
*/
-static void
+void
iris_hiz_exec(struct iris_context *ice,
struct iris_batch *batch,
struct iris_resource *res,
unsigned int level, unsigned int start_layer,
- unsigned int num_layers, enum isl_aux_op op)
+ unsigned int num_layers, enum isl_aux_op op,
+ bool update_clear_depth)
{
assert(iris_resource_level_has_hiz(res, level));
assert(op != ISL_AUX_OP_NONE);
- const char *name = NULL;
+ UNUSED const char *name = NULL;
switch (op) {
case ISL_AUX_OP_FULL_RESOLVE:
* another for depth stall.
*/
iris_emit_pipe_control_flush(batch,
+ "hiz op: pre-flushes (1/2)",
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_CS_STALL);
- iris_emit_pipe_control_flush(batch, PIPE_CONTROL_DEPTH_STALL);
+ iris_emit_pipe_control_flush(batch, "hiz op: pre-flushes (2/2)",
+ PIPE_CONTROL_DEPTH_STALL);
- assert(res->aux.usage == ISL_AUX_USAGE_HIZ && res->aux.bo);
+ assert(isl_aux_usage_has_hiz(res->aux.usage) && res->aux.bo);
+
+ iris_batch_maybe_flush(batch, 1500);
struct blorp_surf surf;
- iris_blorp_surf_for_resource(&surf, &res->base, ISL_AUX_USAGE_HIZ,
- level, true);
+ iris_blorp_surf_for_resource(&ice->vtbl, &batch->screen->isl_dev, &surf,
+ &res->base, res->aux.usage, level, true);
struct blorp_batch blorp_batch;
- blorp_batch_init(&ice->blorp, &blorp_batch, batch,
- BLORP_BATCH_NO_UPDATE_CLEAR_COLOR);
+ enum blorp_batch_flags flags = 0;
+ flags |= update_clear_depth ? 0 : BLORP_BATCH_NO_UPDATE_CLEAR_COLOR;
+ blorp_batch_init(&ice->blorp, &blorp_batch, batch, flags);
blorp_hiz_op(&blorp_batch, &surf, level, start_layer, num_layers, op);
blorp_batch_finish(&blorp_batch);
* TODO: Such as the spec says, this could be conditional.
*/
iris_emit_pipe_control_flush(batch,
+ "hiz op: post flush",
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_DEPTH_STALL);
}
+static bool
+level_has_aux(const struct iris_resource *res, uint32_t level)
+{
+ return isl_aux_usage_has_hiz(res->aux.usage) ?
+ iris_resource_level_has_hiz(res, level) :
+ res->aux.usage != ISL_AUX_USAGE_NONE;
+}
+
/**
* Does the resource's slice have hiz enabled?
*/
return num_layers;
}
-static bool
-has_color_unresolved(const struct iris_resource *res,
- unsigned start_level, unsigned num_levels,
- unsigned start_layer, unsigned num_layers)
+bool
+iris_has_color_unresolved(const struct iris_resource *res,
+ unsigned start_level, unsigned num_levels,
+ unsigned start_layer, unsigned num_layers)
{
if (!res->aux.bo)
return false;
return false;
}
-static enum isl_aux_op
-get_ccs_d_resolve_op(enum isl_aux_state aux_state,
- enum isl_aux_usage aux_usage,
- bool fast_clear_supported)
-{
- assert(aux_usage == ISL_AUX_USAGE_NONE || aux_usage == ISL_AUX_USAGE_CCS_D);
-
- const bool ccs_supported = aux_usage == ISL_AUX_USAGE_CCS_D;
-
- assert(ccs_supported == fast_clear_supported);
-
- switch (aux_state) {
- case ISL_AUX_STATE_CLEAR:
- case ISL_AUX_STATE_PARTIAL_CLEAR:
- if (!ccs_supported)
- return ISL_AUX_OP_FULL_RESOLVE;
- else
- return ISL_AUX_OP_NONE;
-
- case ISL_AUX_STATE_PASS_THROUGH:
- return ISL_AUX_OP_NONE;
-
- case ISL_AUX_STATE_RESOLVED:
- case ISL_AUX_STATE_AUX_INVALID:
- case ISL_AUX_STATE_COMPRESSED_CLEAR:
- case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
- break;
- }
-
- unreachable("Invalid aux state for CCS_D");
-}
-
-static enum isl_aux_op
-get_ccs_e_resolve_op(enum isl_aux_state aux_state,
- enum isl_aux_usage aux_usage,
- bool fast_clear_supported)
-{
- /* CCS_E surfaces can be accessed as CCS_D if we're careful. */
- assert(aux_usage == ISL_AUX_USAGE_NONE ||
- aux_usage == ISL_AUX_USAGE_CCS_D ||
- aux_usage == ISL_AUX_USAGE_CCS_E);
-
- if (aux_usage == ISL_AUX_USAGE_CCS_D)
- assert(fast_clear_supported);
-
- switch (aux_state) {
- case ISL_AUX_STATE_CLEAR:
- case ISL_AUX_STATE_PARTIAL_CLEAR:
- if (fast_clear_supported)
- return ISL_AUX_OP_NONE;
- else if (aux_usage == ISL_AUX_USAGE_CCS_E)
- return ISL_AUX_OP_PARTIAL_RESOLVE;
- else
- return ISL_AUX_OP_FULL_RESOLVE;
-
- case ISL_AUX_STATE_COMPRESSED_CLEAR:
- if (aux_usage != ISL_AUX_USAGE_CCS_E)
- return ISL_AUX_OP_FULL_RESOLVE;
- else if (!fast_clear_supported)
- return ISL_AUX_OP_PARTIAL_RESOLVE;
- else
- return ISL_AUX_OP_NONE;
-
- case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
- if (aux_usage != ISL_AUX_USAGE_CCS_E)
- return ISL_AUX_OP_FULL_RESOLVE;
- else
- return ISL_AUX_OP_NONE;
-
- case ISL_AUX_STATE_PASS_THROUGH:
- return ISL_AUX_OP_NONE;
-
- case ISL_AUX_STATE_RESOLVED:
- case ISL_AUX_STATE_AUX_INVALID:
- break;
- }
-
- unreachable("Invalid aux state for CCS_E");
-}
-
-static void
-iris_resource_prepare_ccs_access(struct iris_context *ice,
- struct iris_batch *batch,
- struct iris_resource *res,
- uint32_t level, uint32_t layer,
- enum isl_aux_usage aux_usage,
- bool fast_clear_supported)
-{
- enum isl_aux_state aux_state = iris_resource_get_aux_state(res, level, layer);
-
- enum isl_aux_op resolve_op;
- if (res->aux.usage == ISL_AUX_USAGE_CCS_E) {
- resolve_op = get_ccs_e_resolve_op(aux_state, aux_usage,
- fast_clear_supported);
- } else {
- assert(res->aux.usage == ISL_AUX_USAGE_CCS_D);
- resolve_op = get_ccs_d_resolve_op(aux_state, aux_usage,
- fast_clear_supported);
- }
-
- if (resolve_op != ISL_AUX_OP_NONE) {
- iris_resolve_color(ice, batch, res, level, layer, resolve_op);
-
- switch (resolve_op) {
- case ISL_AUX_OP_FULL_RESOLVE:
- /* The CCS full resolve operation destroys the CCS and sets it to the
- * pass-through state. (You can also think of this as being both a
- * resolve and an ambiguate in one operation.)
- */
- iris_resource_set_aux_state(res, level, layer, 1,
- ISL_AUX_STATE_PASS_THROUGH);
- break;
-
- case ISL_AUX_OP_PARTIAL_RESOLVE:
- iris_resource_set_aux_state(res, level, layer, 1,
- ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
- break;
-
- default:
- unreachable("Invalid resolve op");
- }
- }
-}
-
-static void
-iris_resource_finish_ccs_write(struct iris_context *ice,
- struct iris_resource *res,
- uint32_t level, uint32_t layer,
- enum isl_aux_usage aux_usage)
-{
- assert(aux_usage == ISL_AUX_USAGE_NONE ||
- aux_usage == ISL_AUX_USAGE_CCS_D ||
- aux_usage == ISL_AUX_USAGE_CCS_E);
-
- enum isl_aux_state aux_state =
- iris_resource_get_aux_state(res, level, layer);
-
- if (res->aux.usage == ISL_AUX_USAGE_CCS_E) {
- switch (aux_state) {
- case ISL_AUX_STATE_CLEAR:
- case ISL_AUX_STATE_PARTIAL_CLEAR:
- assert(aux_usage == ISL_AUX_USAGE_CCS_E ||
- aux_usage == ISL_AUX_USAGE_CCS_D);
-
- if (aux_usage == ISL_AUX_USAGE_CCS_E) {
- iris_resource_set_aux_state(res, level, layer, 1,
- ISL_AUX_STATE_COMPRESSED_CLEAR);
- } else if (aux_state != ISL_AUX_STATE_PARTIAL_CLEAR) {
- iris_resource_set_aux_state(res, level, layer, 1,
- ISL_AUX_STATE_PARTIAL_CLEAR);
- }
- break;
-
- case ISL_AUX_STATE_COMPRESSED_CLEAR:
- case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
- assert(aux_usage == ISL_AUX_USAGE_CCS_E);
- break; /* Nothing to do */
-
- case ISL_AUX_STATE_PASS_THROUGH:
- if (aux_usage == ISL_AUX_USAGE_CCS_E) {
- iris_resource_set_aux_state(res, level, layer, 1,
- ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
- } else {
- /* Nothing to do */
- }
- break;
-
- case ISL_AUX_STATE_RESOLVED:
- case ISL_AUX_STATE_AUX_INVALID:
- unreachable("Invalid aux state for CCS_E");
- }
- } else {
- assert(res->aux.usage == ISL_AUX_USAGE_CCS_D);
- /* CCS_D is a bit simpler */
- switch (aux_state) {
- case ISL_AUX_STATE_CLEAR:
- assert(aux_usage == ISL_AUX_USAGE_CCS_D);
- iris_resource_set_aux_state(res, level, layer, 1,
- ISL_AUX_STATE_PARTIAL_CLEAR);
- break;
-
- case ISL_AUX_STATE_PARTIAL_CLEAR:
- assert(aux_usage == ISL_AUX_USAGE_CCS_D);
- break; /* Nothing to do */
-
- case ISL_AUX_STATE_PASS_THROUGH:
- /* Nothing to do */
- break;
-
- case ISL_AUX_STATE_COMPRESSED_CLEAR:
- case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
- case ISL_AUX_STATE_RESOLVED:
- case ISL_AUX_STATE_AUX_INVALID:
- unreachable("Invalid aux state for CCS_D");
- }
- }
-}
-
-static void
-iris_resource_prepare_mcs_access(struct iris_context *ice,
- struct iris_batch *batch,
- struct iris_resource *res,
- uint32_t layer,
- enum isl_aux_usage aux_usage,
- bool fast_clear_supported)
-{
- assert(aux_usage == ISL_AUX_USAGE_MCS);
-
- switch (iris_resource_get_aux_state(res, 0, layer)) {
- case ISL_AUX_STATE_CLEAR:
- case ISL_AUX_STATE_COMPRESSED_CLEAR:
- if (!fast_clear_supported) {
- iris_mcs_partial_resolve(ice, batch, res, layer, 1);
- iris_resource_set_aux_state(res, 0, layer, 1,
- ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
- }
- break;
-
- case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
- break; /* Nothing to do */
-
- case ISL_AUX_STATE_RESOLVED:
- case ISL_AUX_STATE_PASS_THROUGH:
- case ISL_AUX_STATE_AUX_INVALID:
- case ISL_AUX_STATE_PARTIAL_CLEAR:
- unreachable("Invalid aux state for MCS");
- }
-}
-
-static void
-iris_resource_finish_mcs_write(struct iris_context *ice,
- struct iris_resource *res,
- uint32_t layer,
- enum isl_aux_usage aux_usage)
-{
- assert(aux_usage == ISL_AUX_USAGE_MCS);
-
- switch (iris_resource_get_aux_state(res, 0, layer)) {
- case ISL_AUX_STATE_CLEAR:
- iris_resource_set_aux_state(res, 0, layer, 1,
- ISL_AUX_STATE_COMPRESSED_CLEAR);
- break;
-
- case ISL_AUX_STATE_COMPRESSED_CLEAR:
- case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
- break; /* Nothing to do */
-
- case ISL_AUX_STATE_RESOLVED:
- case ISL_AUX_STATE_PASS_THROUGH:
- case ISL_AUX_STATE_AUX_INVALID:
- case ISL_AUX_STATE_PARTIAL_CLEAR:
- unreachable("Invalid aux state for MCS");
- }
-}
-
-static void
-iris_resource_prepare_hiz_access(struct iris_context *ice,
- struct iris_batch *batch,
- struct iris_resource *res,
- uint32_t level, uint32_t layer,
- enum isl_aux_usage aux_usage,
- bool fast_clear_supported)
-{
- assert(aux_usage == ISL_AUX_USAGE_NONE || aux_usage == ISL_AUX_USAGE_HIZ);
-
- enum isl_aux_op hiz_op = ISL_AUX_OP_NONE;
- switch (iris_resource_get_aux_state(res, level, layer)) {
- case ISL_AUX_STATE_CLEAR:
- case ISL_AUX_STATE_COMPRESSED_CLEAR:
- if (aux_usage != ISL_AUX_USAGE_HIZ || !fast_clear_supported)
- hiz_op = ISL_AUX_OP_FULL_RESOLVE;
- break;
-
- case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
- if (aux_usage != ISL_AUX_USAGE_HIZ)
- hiz_op = ISL_AUX_OP_FULL_RESOLVE;
- break;
-
- case ISL_AUX_STATE_PASS_THROUGH:
- case ISL_AUX_STATE_RESOLVED:
- break;
-
- case ISL_AUX_STATE_AUX_INVALID:
- if (aux_usage == ISL_AUX_USAGE_HIZ)
- hiz_op = ISL_AUX_OP_AMBIGUATE;
- break;
-
- case ISL_AUX_STATE_PARTIAL_CLEAR:
- unreachable("Invalid HiZ state");
- }
-
- if (hiz_op != ISL_AUX_OP_NONE) {
- iris_hiz_exec(ice, batch, res, level, layer, 1, hiz_op);
-
- switch (hiz_op) {
- case ISL_AUX_OP_FULL_RESOLVE:
- iris_resource_set_aux_state(res, level, layer, 1,
- ISL_AUX_STATE_RESOLVED);
- break;
-
- case ISL_AUX_OP_AMBIGUATE:
- /* The HiZ resolve operation is actually an ambiguate */
- iris_resource_set_aux_state(res, level, layer, 1,
- ISL_AUX_STATE_PASS_THROUGH);
- break;
-
- default:
- unreachable("Invalid HiZ op");
- }
- }
-}
-
-static void
-iris_resource_finish_hiz_write(struct iris_context *ice,
- struct iris_resource *res,
- uint32_t level, uint32_t layer,
- enum isl_aux_usage aux_usage)
-{
- assert(aux_usage == ISL_AUX_USAGE_NONE || aux_usage == ISL_AUX_USAGE_HIZ);
-
- switch (iris_resource_get_aux_state(res, level, layer)) {
- case ISL_AUX_STATE_CLEAR:
- assert(aux_usage == ISL_AUX_USAGE_HIZ);
- iris_resource_set_aux_state(res, level, layer, 1,
- ISL_AUX_STATE_COMPRESSED_CLEAR);
- break;
-
- case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
- case ISL_AUX_STATE_COMPRESSED_CLEAR:
- assert(aux_usage == ISL_AUX_USAGE_HIZ);
- break; /* Nothing to do */
-
- case ISL_AUX_STATE_RESOLVED:
- if (aux_usage == ISL_AUX_USAGE_HIZ) {
- iris_resource_set_aux_state(res, level, layer, 1,
- ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
- } else {
- iris_resource_set_aux_state(res, level, layer, 1,
- ISL_AUX_STATE_AUX_INVALID);
- }
- break;
-
- case ISL_AUX_STATE_PASS_THROUGH:
- if (aux_usage == ISL_AUX_USAGE_HIZ) {
- iris_resource_set_aux_state(res, level, layer, 1,
- ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
- }
- break;
-
- case ISL_AUX_STATE_AUX_INVALID:
- assert(aux_usage != ISL_AUX_USAGE_HIZ);
- break;
-
- case ISL_AUX_STATE_PARTIAL_CLEAR:
- unreachable("Invalid HiZ state");
- }
-}
-
void
iris_resource_prepare_access(struct iris_context *ice,
struct iris_batch *batch,
enum isl_aux_usage aux_usage,
bool fast_clear_supported)
{
- num_levels = miptree_level_range_length(res, start_level, num_levels);
-
- switch (res->aux.usage) {
- case ISL_AUX_USAGE_NONE:
- /* Nothing to do */
- break;
+ const uint32_t clamped_levels =
+ miptree_level_range_length(res, start_level, num_levels);
+ for (uint32_t l = 0; l < clamped_levels; l++) {
+ const uint32_t level = start_level + l;
+ if (!level_has_aux(res, level))
+ continue;
- case ISL_AUX_USAGE_MCS:
- assert(start_level == 0 && num_levels == 1);
const uint32_t level_layers =
- miptree_layer_range_length(res, 0, start_layer, num_layers);
+ miptree_layer_range_length(res, level, start_layer, num_layers);
for (uint32_t a = 0; a < level_layers; a++) {
- iris_resource_prepare_mcs_access(ice, batch, res, start_layer + a,
- aux_usage, fast_clear_supported);
- }
- break;
-
- case ISL_AUX_USAGE_CCS_D:
- case ISL_AUX_USAGE_CCS_E:
- for (uint32_t l = 0; l < num_levels; l++) {
- const uint32_t level = start_level + l;
- const uint32_t level_layers =
- miptree_layer_range_length(res, level, start_layer, num_layers);
- for (uint32_t a = 0; a < level_layers; a++) {
- iris_resource_prepare_ccs_access(ice, batch, res, level,
- start_layer + a,
- aux_usage, fast_clear_supported);
+ const uint32_t layer = start_layer + a;
+ const enum isl_aux_state aux_state =
+ iris_resource_get_aux_state(res, level, layer);
+ const enum isl_aux_op aux_op =
+ isl_aux_prepare_access(aux_state, aux_usage, fast_clear_supported);
+
+ if (aux_op == ISL_AUX_OP_NONE) {
+ /* Nothing to do here. */
+ } else if (isl_aux_usage_has_mcs(res->aux.usage)) {
+ assert(aux_op == ISL_AUX_OP_PARTIAL_RESOLVE);
+ iris_mcs_partial_resolve(ice, batch, res, layer, 1);
+ } else if (isl_aux_usage_has_hiz(res->aux.usage)) {
+ iris_hiz_exec(ice, batch, res, level, layer, 1, aux_op, false);
+ } else {
+ assert(isl_aux_usage_has_ccs(res->aux.usage));
+ iris_resolve_color(ice, batch, res, level, layer, aux_op);
}
- }
- break;
-
- case ISL_AUX_USAGE_HIZ:
- for (uint32_t l = 0; l < num_levels; l++) {
- const uint32_t level = start_level + l;
- if (!iris_resource_level_has_hiz(res, level))
- continue;
- const uint32_t level_layers =
- miptree_layer_range_length(res, level, start_layer, num_layers);
- for (uint32_t a = 0; a < level_layers; a++) {
- iris_resource_prepare_hiz_access(ice, batch, res, level,
- start_layer + a, aux_usage,
- fast_clear_supported);
- }
+ const enum isl_aux_state new_state =
+ isl_aux_state_transition_aux_op(aux_state, res->aux.usage, aux_op);
+ iris_resource_set_aux_state(ice, res, level, layer, 1, new_state);
}
- break;
-
- default:
- unreachable("Invalid aux usage");
}
}
uint32_t start_layer, uint32_t num_layers,
enum isl_aux_usage aux_usage)
{
- num_layers = miptree_layer_range_length(res, level, start_layer, num_layers);
-
- switch (res->aux.usage) {
- case ISL_AUX_USAGE_NONE:
- break;
-
- case ISL_AUX_USAGE_MCS:
- for (uint32_t a = 0; a < num_layers; a++) {
- iris_resource_finish_mcs_write(ice, res, start_layer + a,
- aux_usage);
- }
- break;
-
- case ISL_AUX_USAGE_CCS_D:
- case ISL_AUX_USAGE_CCS_E:
- for (uint32_t a = 0; a < num_layers; a++) {
- iris_resource_finish_ccs_write(ice, res, level, start_layer + a,
- aux_usage);
- }
- break;
-
- case ISL_AUX_USAGE_HIZ:
- if (!iris_resource_level_has_hiz(res, level))
- return;
-
- for (uint32_t a = 0; a < num_layers; a++) {
- iris_resource_finish_hiz_write(ice, res, level, start_layer + a,
- aux_usage);
- }
- break;
-
- default:
- unreachable("Invavlid aux usage");
+ if (!level_has_aux(res, level))
+ return;
+
+ const uint32_t level_layers =
+ miptree_layer_range_length(res, level, start_layer, num_layers);
+
+ for (uint32_t a = 0; a < level_layers; a++) {
+ const uint32_t layer = start_layer + a;
+ const enum isl_aux_state aux_state =
+ iris_resource_get_aux_state(res, level, layer);
+ const enum isl_aux_state new_aux_state =
+ isl_aux_state_transition_write(aux_state, aux_usage, false);
+ iris_resource_set_aux_state(ice, res, level, layer, 1, new_aux_state);
}
}
if (res->surf.usage & ISL_SURF_USAGE_DEPTH_BIT) {
assert(iris_resource_level_has_hiz(res, level));
- } else if (res->surf.usage & ISL_SURF_USAGE_STENCIL_BIT) {
- unreachable("Cannot get aux state for stencil");
} else {
assert(res->surf.samples == 1 ||
res->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY);
}
void
-iris_resource_set_aux_state(struct iris_resource *res, uint32_t level,
+iris_resource_set_aux_state(struct iris_context *ice,
+ struct iris_resource *res, uint32_t level,
uint32_t start_layer, uint32_t num_layers,
enum isl_aux_state aux_state)
{
if (res->surf.usage & ISL_SURF_USAGE_DEPTH_BIT) {
assert(iris_resource_level_has_hiz(res, level));
- } else if (res->surf.usage & ISL_SURF_USAGE_STENCIL_BIT) {
- unreachable("Cannot set aux state for stencil");
} else {
assert(res->surf.samples == 1 ||
res->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY);
for (unsigned a = 0; a < num_layers; a++) {
if (res->aux.state[level][start_layer + a] != aux_state) {
res->aux.state[level][start_layer + a] = aux_state;
- // XXX: dirty works differently
- // brw->ctx.NewDriverState |= BRW_NEW_AUX_STATE;
+ /* XXX: Need to track which bindings to make dirty */
+ ice->state.dirty |= IRIS_ALL_DIRTY_BINDINGS;
}
}
}
enum isl_aux_usage
iris_resource_texture_aux_usage(struct iris_context *ice,
const struct iris_resource *res,
- enum isl_format view_format,
- enum gen9_astc5x5_wa_tex_type astc5x5_wa_bits)
+ enum isl_format view_format)
{
struct iris_screen *screen = (void *) ice->ctx.screen;
struct gen_device_info *devinfo = &screen->devinfo;
- assert(devinfo->gen == 9 || astc5x5_wa_bits == 0);
-
- /* On gen9, ASTC 5x5 textures cannot live in the sampler cache along side
- * CCS or HiZ compressed textures. See gen9_apply_astc5x5_wa_flush() for
- * details.
- */
- if ((astc5x5_wa_bits & GEN9_ASTC5X5_WA_TEX_TYPE_ASTC5x5) &&
- res->aux.usage != ISL_AUX_USAGE_MCS)
- return ISL_AUX_USAGE_NONE;
-
switch (res->aux.usage) {
case ISL_AUX_USAGE_HIZ:
- if (sample_with_hiz(devinfo, res))
+ if (iris_sample_with_depth_aux(devinfo, res))
return ISL_AUX_USAGE_HIZ;
break;
+ case ISL_AUX_USAGE_HIZ_CCS:
+ assert(!iris_sample_with_depth_aux(devinfo, res));
+ return ISL_AUX_USAGE_NONE;
+
+ case ISL_AUX_USAGE_HIZ_CCS_WT:
+ if (iris_sample_with_depth_aux(devinfo, res))
+ return ISL_AUX_USAGE_HIZ_CCS_WT;
+ break;
+
case ISL_AUX_USAGE_MCS:
- return ISL_AUX_USAGE_MCS;
+ case ISL_AUX_USAGE_MCS_CCS:
+ case ISL_AUX_USAGE_STC_CCS:
+ return res->aux.usage;
case ISL_AUX_USAGE_CCS_D:
case ISL_AUX_USAGE_CCS_E:
* ISL_AUX_USAGE_NONE. This way, texturing won't even look at the
* aux surface and we can save some bandwidth.
*/
- if (!has_color_unresolved(res, 0, INTEL_REMAINING_LEVELS,
- 0, INTEL_REMAINING_LAYERS))
+ if (!iris_has_color_unresolved(res, 0, INTEL_REMAINING_LEVELS,
+ 0, INTEL_REMAINING_LAYERS))
return ISL_AUX_USAGE_NONE;
if (can_texture_with_ccs(devinfo, &ice->dbg, res, view_format))
struct iris_resource *res,
enum isl_format view_format,
uint32_t start_level, uint32_t num_levels,
- uint32_t start_layer, uint32_t num_layers,
- enum gen9_astc5x5_wa_tex_type astc5x5_wa_bits)
+ uint32_t start_layer, uint32_t num_layers)
{
enum isl_aux_usage aux_usage =
- iris_resource_texture_aux_usage(ice, res, view_format, astc5x5_wa_bits);
+ iris_resource_texture_aux_usage(ice, res, view_format);
- bool clear_supported = aux_usage != ISL_AUX_USAGE_NONE;
+ bool clear_supported = isl_aux_usage_has_fast_clears(aux_usage);
/* Clear color is specified as ints or floats and the conversion is done by
* the sampler. If we have a texture view, we would have to perform the
aux_usage, clear_supported);
}
-void
-iris_resource_prepare_image(struct iris_context *ice,
- struct iris_batch *batch,
- struct iris_resource *res)
-{
- /* The data port doesn't understand any compression */
- iris_resource_prepare_access(ice, batch, res, 0, INTEL_REMAINING_LEVELS,
- 0, INTEL_REMAINING_LAYERS,
- ISL_AUX_USAGE_NONE, false);
-}
-
enum isl_aux_usage
iris_resource_render_aux_usage(struct iris_context *ice,
struct iris_resource *res,
switch (res->aux.usage) {
case ISL_AUX_USAGE_MCS:
- return ISL_AUX_USAGE_MCS;
+ case ISL_AUX_USAGE_MCS_CCS:
+ return res->aux.usage;
case ISL_AUX_USAGE_CCS_D:
case ISL_AUX_USAGE_CCS_E:
* formats. However, there are issues with blending where it doesn't
* properly apply the sRGB curve to the clear color when blending.
*/
- /* XXX:
if (devinfo->gen >= 9 && blend_enabled &&
isl_format_is_srgb(render_format) &&
- !isl_color_value_is_zero_one(res->fast_clear_color, render_format))
+ !isl_color_value_is_zero_one(res->aux.clear_color, render_format))
return ISL_AUX_USAGE_NONE;
- */
if (res->aux.usage == ISL_AUX_USAGE_CCS_E &&
format_ccs_e_compat_with_resource(devinfo, res, render_format))
return ISL_AUX_USAGE_CCS_E;
- /* Otherwise, we have to fall back to CCS_D */
- return ISL_AUX_USAGE_CCS_D;
+ /* Otherwise, we try to fall back to CCS_D */
+ if (isl_format_supports_ccs_d(devinfo, render_format))
+ return ISL_AUX_USAGE_CCS_D;
default:
return ISL_AUX_USAGE_NONE;
{
iris_resource_prepare_access(ice, batch, res, level, 1, start_layer,
layer_count, aux_usage,
- aux_usage != ISL_AUX_USAGE_NONE);
+ isl_aux_usage_has_fast_clears(aux_usage));
}
void