+
+ tex_cache_flush_hack(batch, src_fmt.fmt, src_res->surf.format);
+
+ if (info->mask & main_mask) {
+ iris_resource_finish_write(ice, dst_res, info->dst.level, info->dst.box.z,
+ info->dst.box.depth, dst_aux_usage);
+ }
+
+ if (stc_dst) {
+ iris_resource_finish_write(ice, stc_dst, info->dst.level, info->dst.box.z,
+ info->dst.box.depth, stc_dst_aux_usage);
+ }
+
+ iris_flush_and_dirty_for_history(ice, batch, (struct iris_resource *)
+ info->dst.resource,
+ PIPE_CONTROL_RENDER_TARGET_FLUSH,
+ "cache history: post-blit");
+}
+
+static void
+get_copy_region_aux_settings(struct iris_context *ice,
+ struct iris_resource *res,
+ enum isl_aux_usage *out_aux_usage,
+ bool *out_clear_supported,
+ bool is_render_target)
+{
+ struct iris_screen *screen = (void *) ice->ctx.screen;
+ const struct gen_device_info *devinfo = &screen->devinfo;
+
+ switch (res->aux.usage) {
+ case ISL_AUX_USAGE_HIZ:
+ case ISL_AUX_USAGE_HIZ_CCS:
+ case ISL_AUX_USAGE_HIZ_CCS_WT:
+ if (is_render_target) {
+ *out_aux_usage = res->aux.usage;
+ } else {
+ *out_aux_usage = iris_resource_texture_aux_usage(ice, res,
+ res->surf.format);
+ }
+ *out_clear_supported = (*out_aux_usage != ISL_AUX_USAGE_NONE);
+ break;
+ case ISL_AUX_USAGE_MCS:
+ case ISL_AUX_USAGE_MCS_CCS:
+ case ISL_AUX_USAGE_CCS_E:
+ case ISL_AUX_USAGE_GEN12_CCS_E:
+ *out_aux_usage = res->aux.usage;
+ /* Prior to Gen9, fast-clear only supported 0/1 clear colors. Since
+ * we're going to re-interpret the format as an integer format possibly
+ * with a different number of components, we can't handle clear colors
+ * until Gen9.
+ */
+ *out_clear_supported = devinfo->gen >= 9;
+ break;
+ case ISL_AUX_USAGE_STC_CCS:
+ *out_aux_usage = res->aux.usage;
+ *out_clear_supported = false;
+ break;
+ default:
+ *out_aux_usage = ISL_AUX_USAGE_NONE;
+ *out_clear_supported = false;
+ break;
+ }
+}
+
+/**
+ * Perform a GPU-based raw memory copy between compatible view classes.
+ *
+ * Does not perform any flushing - the new data may still be left in the
+ * render cache, and old data may remain in other caches.
+ *
+ * Wraps blorp_copy() and blorp_buffer_copy().
+ */
+void
+iris_copy_region(struct blorp_context *blorp,
+ struct iris_batch *batch,
+ struct pipe_resource *dst,
+ unsigned dst_level,
+ unsigned dstx, unsigned dsty, unsigned dstz,
+ struct pipe_resource *src,
+ unsigned src_level,
+ const struct pipe_box *src_box)
+{
+ struct blorp_batch blorp_batch;
+ struct iris_context *ice = blorp->driver_ctx;
+ struct iris_screen *screen = (void *) ice->ctx.screen;
+ struct iris_resource *src_res = (void *) src;
+ struct iris_resource *dst_res = (void *) dst;
+
+ enum isl_aux_usage src_aux_usage, dst_aux_usage;
+ bool src_clear_supported, dst_clear_supported;
+ get_copy_region_aux_settings(ice, src_res, &src_aux_usage,
+ &src_clear_supported, false);
+ get_copy_region_aux_settings(ice, dst_res, &dst_aux_usage,
+ &dst_clear_supported, true);
+
+ if (iris_batch_references(batch, src_res->bo))
+ tex_cache_flush_hack(batch, ISL_FORMAT_UNSUPPORTED, src_res->surf.format);
+
+ if (dst->target == PIPE_BUFFER)
+ util_range_add(&dst_res->base, &dst_res->valid_buffer_range, dstx, dstx + src_box->width);
+
+ if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
+ struct blorp_address src_addr = {
+ .buffer = iris_resource_bo(src), .offset = src_box->x,
+ };
+ struct blorp_address dst_addr = {
+ .buffer = iris_resource_bo(dst), .offset = dstx,
+ .reloc_flags = EXEC_OBJECT_WRITE,
+ };
+
+ iris_emit_buffer_barrier_for(batch, iris_resource_bo(src),
+ IRIS_DOMAIN_OTHER_READ);
+ iris_emit_buffer_barrier_for(batch, iris_resource_bo(dst),
+ IRIS_DOMAIN_RENDER_WRITE);
+
+ iris_batch_maybe_flush(batch, 1500);
+
+ iris_batch_sync_region_start(batch);
+ blorp_batch_init(&ice->blorp, &blorp_batch, batch, 0);
+ blorp_buffer_copy(&blorp_batch, src_addr, dst_addr, src_box->width);
+ blorp_batch_finish(&blorp_batch);
+ iris_batch_sync_region_end(batch);
+ } else {
+ // XXX: what about one surface being a buffer and not the other?
+
+ struct blorp_surf src_surf, dst_surf;
+ iris_blorp_surf_for_resource(&screen->isl_dev, &src_surf,
+ src, src_aux_usage, src_level, false);
+ iris_blorp_surf_for_resource(&screen->isl_dev, &dst_surf,
+ dst, dst_aux_usage, dst_level, true);
+
+ iris_resource_prepare_access(ice, src_res, src_level, 1,
+ src_box->z, src_box->depth,
+ src_aux_usage, src_clear_supported);
+ iris_resource_prepare_access(ice, dst_res, dst_level, 1,
+ dstz, src_box->depth,
+ dst_aux_usage, dst_clear_supported);
+
+ iris_emit_buffer_barrier_for(batch, iris_resource_bo(src),
+ IRIS_DOMAIN_OTHER_READ);
+ iris_emit_buffer_barrier_for(batch, iris_resource_bo(dst),
+ IRIS_DOMAIN_RENDER_WRITE);
+
+ blorp_batch_init(&ice->blorp, &blorp_batch, batch, 0);
+
+ for (int slice = 0; slice < src_box->depth; slice++) {
+ iris_batch_maybe_flush(batch, 1500);
+
+ iris_batch_sync_region_start(batch);
+ blorp_copy(&blorp_batch, &src_surf, src_level, src_box->z + slice,
+ &dst_surf, dst_level, dstz + slice,
+ src_box->x, src_box->y, dstx, dsty,
+ src_box->width, src_box->height);
+ iris_batch_sync_region_end(batch);
+ }
+ blorp_batch_finish(&blorp_batch);
+
+ iris_resource_finish_write(ice, dst_res, dst_level, dstz,
+ src_box->depth, dst_aux_usage);
+ }
+
+ tex_cache_flush_hack(batch, ISL_FORMAT_UNSUPPORTED, src_res->surf.format);
+}
+
+static struct iris_batch *
+get_preferred_batch(struct iris_context *ice, struct iris_bo *bo)
+{
+ /* If the compute batch is already using this buffer, we'd prefer to
+ * continue queueing in the compute batch.
+ */
+ if (iris_batch_references(&ice->batches[IRIS_BATCH_COMPUTE], bo))
+ return &ice->batches[IRIS_BATCH_COMPUTE];
+
+ /* Otherwise default to the render batch. */
+ return &ice->batches[IRIS_BATCH_RENDER];