#include "util/hash_table.h"
#include "util/set.h"
#include "iris_context.h"
+#include "compiler/nir/nir.h"
/**
* Disable auxiliary buffers if a renderbuffer is also bound as a texture
while (views) {
const int i = u_bit_scan(&views);
- struct iris_resource *res = (void *) shs->image[i].base.resource;
+ struct pipe_image_view *pview = &shs->image[i].base;
+ struct iris_resource *res = (void *) pview->resource;
if (res->base.target != PIPE_BUFFER) {
if (consider_framebuffer) {
disable_rb_aux_buffer(ice, draw_aux_buffer_disabled,
- res, 0, ~0, "as a shader image");
+ res, pview->u.tex.level, 1,
+ "as a shader image");
}
- iris_resource_prepare_image(ice, batch, res);
+ unsigned num_layers =
+ pview->u.tex.last_layer - pview->u.tex.first_layer + 1;
+
+ /* The data port doesn't understand any compression */
+ iris_resource_prepare_access(ice, batch, res,
+ pview->u.tex.level, 1,
+ pview->u.tex.first_layer, num_layers,
+ ISL_AUX_USAGE_NONE, false);
}
iris_cache_flush_for_read(batch, res->bo);
bool *draw_aux_buffer_disabled)
{
struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
+ struct iris_screen *screen = (void *) ice->ctx.screen;
+ struct gen_device_info *devinfo = &screen->devinfo;
+ struct iris_uncompiled_shader *ish =
+ ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
+ const nir_shader *nir = ish->nir;
if (ice->state.dirty & IRIS_DIRTY_DEPTH_BUFFER) {
struct pipe_surface *zs_surf = cso_fb->zsbuf;
}
}
+ if (devinfo->gen == 8 && nir->info.outputs_read != 0) {
+ for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
+ if (cso_fb->cbufs[i]) {
+ struct iris_surface *surf = (void *) cso_fb->cbufs[i];
+ struct iris_resource *res = (void *) cso_fb->cbufs[i]->texture;
+
+ iris_resource_prepare_texture(ice, batch, res, surf->view.format,
+ surf->view.base_level, 1,
+ surf->view.base_array_layer,
+ surf->view.array_len,
+ 0);
+ }
+ }
+ }
+
if (ice->state.dirty & (IRIS_DIRTY_BINDINGS_FS | IRIS_DIRTY_BLEND_STATE)) {
for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
struct iris_surface *surf = (void *) cso_fb->cbufs[i];
}
static bool
-sample_with_hiz(const struct gen_device_info *devinfo,
- const struct iris_resource *res)
+sample_with_depth_aux(const struct gen_device_info *devinfo,
+ const struct iris_resource *res)
{
- if (!devinfo->has_sample_with_hiz)
+ switch (res->aux.usage) {
+ case ISL_AUX_USAGE_HIZ:
+ if (devinfo->has_sample_with_hiz)
+ break;
return false;
-
- if (res->aux.usage != ISL_AUX_USAGE_HIZ)
+ case ISL_AUX_USAGE_HIZ_CCS:
+ /* Write through mode must have been enabled for prior writes. */
+ if (isl_surf_supports_hiz_ccs_wt(devinfo, &res->surf, res->aux.usage))
+ break;
return false;
+ default:
+ return false;
+ }
/* It seems the hardware won't fallback to the depth buffer if some of the
* mipmap levels aren't available in the HiZ buffer. So we need all levels
iris_emit_pipe_control_flush(batch, "hiz op: pre-flushes (2/2)",
PIPE_CONTROL_DEPTH_STALL);
- assert(res->aux.usage == ISL_AUX_USAGE_HIZ && res->aux.bo);
+ assert(isl_aux_usage_has_hiz(res->aux.usage) && res->aux.bo);
iris_batch_maybe_flush(batch, 1500);
struct blorp_surf surf;
iris_blorp_surf_for_resource(&ice->vtbl, &surf, &res->base,
- ISL_AUX_USAGE_HIZ, level, true);
+ res->aux.usage, level, true);
struct blorp_batch blorp_batch;
enum blorp_batch_flags flags = 0;
enum isl_aux_usage aux_usage,
bool fast_clear_supported)
{
- assert(aux_usage == ISL_AUX_USAGE_NONE || aux_usage == ISL_AUX_USAGE_HIZ);
+ assert(aux_usage == ISL_AUX_USAGE_NONE ||
+ aux_usage == ISL_AUX_USAGE_HIZ ||
+ aux_usage == ISL_AUX_USAGE_HIZ_CCS ||
+ aux_usage == ISL_AUX_USAGE_CCS_E);
enum isl_aux_op hiz_op = ISL_AUX_OP_NONE;
switch (iris_resource_get_aux_state(res, level, layer)) {
case ISL_AUX_STATE_CLEAR:
case ISL_AUX_STATE_COMPRESSED_CLEAR:
- if (aux_usage != ISL_AUX_USAGE_HIZ || !fast_clear_supported)
+ if (aux_usage == ISL_AUX_USAGE_NONE || !fast_clear_supported)
hiz_op = ISL_AUX_OP_FULL_RESOLVE;
break;
case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
- if (aux_usage != ISL_AUX_USAGE_HIZ)
+ if (aux_usage == ISL_AUX_USAGE_NONE)
hiz_op = ISL_AUX_OP_FULL_RESOLVE;
break;
break;
case ISL_AUX_STATE_AUX_INVALID:
- if (aux_usage == ISL_AUX_USAGE_HIZ)
+ if (aux_usage != ISL_AUX_USAGE_NONE)
hiz_op = ISL_AUX_OP_AMBIGUATE;
break;
uint32_t level, uint32_t layer,
enum isl_aux_usage aux_usage)
{
- assert(aux_usage == ISL_AUX_USAGE_NONE || aux_usage == ISL_AUX_USAGE_HIZ);
+ assert(aux_usage == ISL_AUX_USAGE_NONE ||
+ isl_aux_usage_has_hiz(aux_usage));
switch (iris_resource_get_aux_state(res, level, layer)) {
case ISL_AUX_STATE_CLEAR:
- assert(aux_usage == ISL_AUX_USAGE_HIZ);
+ assert(isl_aux_usage_has_hiz(aux_usage));
iris_resource_set_aux_state(ice, res, level, layer, 1,
ISL_AUX_STATE_COMPRESSED_CLEAR);
break;
case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
case ISL_AUX_STATE_COMPRESSED_CLEAR:
- assert(aux_usage == ISL_AUX_USAGE_HIZ);
+ assert(isl_aux_usage_has_hiz(aux_usage));
break; /* Nothing to do */
case ISL_AUX_STATE_RESOLVED:
- if (aux_usage == ISL_AUX_USAGE_HIZ) {
+ if (isl_aux_usage_has_hiz(aux_usage)) {
iris_resource_set_aux_state(ice, res, level, layer, 1,
ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
} else {
break;
case ISL_AUX_STATE_PASS_THROUGH:
- if (aux_usage == ISL_AUX_USAGE_HIZ) {
+ if (isl_aux_usage_has_hiz(aux_usage)) {
iris_resource_set_aux_state(ice, res, level, layer, 1,
ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
}
break;
case ISL_AUX_STATE_AUX_INVALID:
- assert(aux_usage != ISL_AUX_USAGE_HIZ);
+ assert(!isl_aux_usage_has_hiz(aux_usage));
break;
case ISL_AUX_STATE_PARTIAL_CLEAR:
break;
case ISL_AUX_USAGE_HIZ:
+ case ISL_AUX_USAGE_HIZ_CCS:
for (uint32_t l = 0; l < num_levels; l++) {
const uint32_t level = start_level + l;
if (!iris_resource_level_has_hiz(res, level))
break;
case ISL_AUX_USAGE_HIZ:
+ case ISL_AUX_USAGE_HIZ_CCS:
if (!iris_resource_level_has_hiz(res, level))
return;
switch (res->aux.usage) {
case ISL_AUX_USAGE_HIZ:
- if (sample_with_hiz(devinfo, res))
+ if (sample_with_depth_aux(devinfo, res))
return ISL_AUX_USAGE_HIZ;
break;
+ case ISL_AUX_USAGE_HIZ_CCS:
+ if (sample_with_depth_aux(devinfo, res))
+ return ISL_AUX_USAGE_CCS_E;
+ break;
+
case ISL_AUX_USAGE_MCS:
return ISL_AUX_USAGE_MCS;
aux_usage, clear_supported);
}
-void
-iris_resource_prepare_image(struct iris_context *ice,
- struct iris_batch *batch,
- struct iris_resource *res)
-{
- /* The data port doesn't understand any compression */
- iris_resource_prepare_access(ice, batch, res, 0, INTEL_REMAINING_LEVELS,
- 0, INTEL_REMAINING_LAYERS,
- ISL_AUX_USAGE_NONE, false);
-}
-
enum isl_aux_usage
iris_resource_render_aux_usage(struct iris_context *ice,
struct iris_resource *res,
format_ccs_e_compat_with_resource(devinfo, res, render_format))
return ISL_AUX_USAGE_CCS_E;
- /* Otherwise, we have to fall back to CCS_D */
- return ISL_AUX_USAGE_CCS_D;
+ /* Otherwise, we try to fall back to CCS_D */
+ if (isl_format_supports_ccs_d(devinfo, render_format))
+ return ISL_AUX_USAGE_CCS_D;
default:
return ISL_AUX_USAGE_NONE;