brw_emit_end_of_pipe_sync(brw, PIPE_CONTROL_RENDER_TARGET_FLUSH);
}
+void
+brw_blorp_mcs_partial_resolve(struct brw_context *brw,
+ struct intel_mipmap_tree *mt,
+ uint32_t start_layer, uint32_t num_layers)
+{
+ DBG("%s to mt %p layers %u-%u\n", __FUNCTION__, mt,
+ start_layer, start_layer + num_layers - 1);
+
+ const mesa_format format = _mesa_get_srgb_format_linear(mt->format);
+ enum isl_format isl_format = brw_blorp_to_isl_format(brw, format, true);
+
+ struct isl_surf isl_tmp[1];
+ struct blorp_surf surf;
+ uint32_t level = 0;
+ blorp_surf_for_miptree(brw, &surf, mt, true, false, 0,
+ &level, start_layer, num_layers, isl_tmp);
+
+ struct blorp_batch batch;
+ blorp_batch_init(&brw->blorp, &batch, brw, 0);
+ blorp_mcs_partial_resolve(&batch, &surf, isl_format,
+ start_layer, num_layers);
+ blorp_batch_finish(&batch);
+}
+
/**
* Perform a HiZ or depth resolve operation.
*
unsigned level, unsigned layer,
enum blorp_fast_clear_op resolve_op);
+void
+brw_blorp_mcs_partial_resolve(struct brw_context *brw,
+ struct intel_mipmap_tree *mt,
+ uint32_t start_layer, uint32_t num_layers);
+
void
intel_hiz_exec(struct brw_context *brw, struct intel_mipmap_tree *mt,
unsigned int level, unsigned int start_layer,
}
}
+static void
+intel_miptree_prepare_mcs_access(struct brw_context *brw,
+ struct intel_mipmap_tree *mt,
+ uint32_t layer,
+ bool mcs_supported,
+ bool fast_clear_supported)
+{
+ switch (intel_miptree_get_aux_state(mt, 0, layer)) {
+ case ISL_AUX_STATE_CLEAR:
+ case ISL_AUX_STATE_COMPRESSED_CLEAR:
+ assert(mcs_supported);
+ if (!fast_clear_supported) {
+ brw_blorp_mcs_partial_resolve(brw, mt, layer, 1);
+ intel_miptree_set_aux_state(brw, mt, 0, layer, 1,
+ ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
+ }
+ break;
+
+ case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+ assert(mcs_supported);
+ break; /* Nothing to do */
+
+ case ISL_AUX_STATE_RESOLVED:
+ case ISL_AUX_STATE_PASS_THROUGH:
+ case ISL_AUX_STATE_AUX_INVALID:
+ unreachable("Invalid aux state for MCS");
+ }
+}
+
static void
intel_miptree_finish_mcs_write(struct brw_context *brw,
struct intel_mipmap_tree *mt,
break;
case ISL_AUX_STATE_COMPRESSED_CLEAR:
+ case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
assert(written_with_mcs);
break; /* Nothing to do */
- case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
case ISL_AUX_STATE_RESOLVED:
case ISL_AUX_STATE_PASS_THROUGH:
case ISL_AUX_STATE_AUX_INVALID:
return;
if (mt->surf.samples > 1) {
- /* Nothing to do for MSAA */
- assert(aux_supported && fast_clear_supported);
+ assert(start_level == 0 && num_levels == 1);
+ const uint32_t level_layers =
+ miptree_layer_range_length(mt, 0, start_layer, num_layers);
+ for (uint32_t a = 0; a < level_layers; a++) {
+ intel_miptree_prepare_mcs_access(brw, mt, start_layer + a,
+ aux_supported,
+ fast_clear_supported);
+ }
} else {
for (uint32_t l = 0; l < num_levels; l++) {
const uint32_t level = start_level + l;