#include "pipe/p_state.h"
#include "pipe/p_context.h"
#include "pipe/p_screen.h"
-#include "util/u_format.h"
+#include "util/format/u_format.h"
#include "util/u_inlines.h"
#include "util/ralloc.h"
#include "intel/blorp/blorp.h"
* clipping 4 * 2 = 8 > 5 in the src.
*/
+ if (*src_x0 == *src_x1 || *src_y0 == *src_y1
+ || *dst_x0 == *dst_x1 || *dst_y0 == *dst_y1)
+ return true;
+
float scale_x = (float) (*src_x1 - *src_x0) / (*dst_x1 - *dst_x0);
float scale_y = (float) (*src_y1 - *src_y0) / (*dst_y1 - *dst_y0);
clip_coordinates(mirror_y, src_y1, dst_y1, dst_y0,
clip_dst_y1, clip_dst_y0, scale_y, false);
- return false;
+ /* Check for invalid bounds
+ * Can't blit for 0-dimensions
+ */
+ return *src_x0 == *src_x1 || *src_y0 == *src_y1
+ || *dst_x0 == *dst_x1 || *dst_y0 == *dst_y1;
}
void
-iris_blorp_surf_for_resource(struct blorp_surf *surf,
+iris_blorp_surf_for_resource(struct iris_vtable *vtbl,
+ struct isl_device *isl_dev,
+ struct blorp_surf *surf,
struct pipe_resource *p_res,
enum isl_aux_usage aux_usage,
+ unsigned level,
bool is_render_target)
{
struct iris_resource *res = (void *) p_res;
+ assert(!iris_resource_unfinished_aux_import(res));
+
+ if (isl_aux_usage_has_hiz(aux_usage) &&
+ !iris_resource_level_has_hiz(res, level))
+ aux_usage = ISL_AUX_USAGE_NONE;
+
*surf = (struct blorp_surf) {
.surf = &res->surf,
.addr = (struct blorp_address) {
.buffer = res->bo,
- .offset = 0, // XXX: ???
+ .offset = res->offset,
.reloc_flags = is_render_target ? EXEC_OBJECT_WRITE : 0,
- .mocs = I915_MOCS_CACHED, // XXX: BDW MOCS, PTE MOCS
+ .mocs = iris_mocs(res->bo, isl_dev),
},
.aux_usage = aux_usage,
};
.buffer = res->aux.bo,
.offset = res->aux.offset,
.reloc_flags = is_render_target ? EXEC_OBJECT_WRITE : 0,
- .mocs = I915_MOCS_CACHED,
+ .mocs = iris_mocs(res->bo, isl_dev),
+ };
+ surf->clear_color =
+ iris_resource_get_clear_color(res, NULL, NULL);
+ surf->clear_color_addr = (struct blorp_address) {
+ .buffer = res->aux.clear_color_bo,
+ .offset = res->aux.clear_color_offset,
+ .reloc_flags = 0,
+ .mocs = iris_mocs(res->aux.clear_color_bo, isl_dev),
};
}
+}
+
+static bool
+is_astc(enum isl_format format)
+{
+ return format != ISL_FORMAT_UNSUPPORTED &&
+ isl_format_get_layout(format)->txc == ISL_TXC_ASTC;
+}
- // XXX: ASTC
+static void
+tex_cache_flush_hack(struct iris_batch *batch,
+ enum isl_format view_format,
+ enum isl_format surf_format)
+{
+ const struct gen_device_info *devinfo = &batch->screen->devinfo;
+
+ /* The WaSamplerCacheFlushBetweenRedescribedSurfaceReads workaround says:
+ *
+ * "Currently Sampler assumes that a surface would not have two
+ * different format associate with it. It will not properly cache
+ * the different views in the MT cache, causing a data corruption."
+ *
+ * We may need to handle this for texture views in general someday, but
+ * for now we handle it here, as it hurts copies and blits particularly
+ * badly because they ofter reinterpret formats.
+ *
+ * If the BO hasn't been referenced yet this batch, we assume that the
+ * texture cache doesn't contain any relevant data nor need flushing.
+ *
+ * Icelake (Gen11+) claims to fix this issue, but seems to still have
+ * issues with ASTC formats.
+ */
+ bool need_flush = devinfo->gen >= 11 ?
+ is_astc(surf_format) != is_astc(view_format) :
+ view_format != surf_format;
+ if (!need_flush)
+ return;
+
+ const char *reason =
+ "workaround: WaSamplerCacheFlushBetweenRedescribedSurfaceReads";
+
+ iris_emit_pipe_control_flush(batch, reason, PIPE_CONTROL_CS_STALL);
+ iris_emit_pipe_control_flush(batch, reason,
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
+}
+
+static enum isl_aux_usage
+iris_resource_blorp_write_aux_usage(struct iris_context *ice,
+ struct iris_resource *res,
+ enum isl_format render_format)
+{
+ if (res->surf.usage & (ISL_SURF_USAGE_DEPTH_BIT |
+ ISL_SURF_USAGE_STENCIL_BIT)) {
+ assert(render_format == res->surf.format);
+ return res->aux.usage;
+ } else {
+ return iris_resource_render_aux_usage(ice, res, render_format,
+ false, false);
+ }
}
/**
struct iris_context *ice = (void *) ctx;
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
const struct gen_device_info *devinfo = &screen->devinfo;
+ struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
enum blorp_batch_flags blorp_flags = 0;
struct iris_resource *src_res = (void *) info->src.resource;
struct iris_resource *dst_res = (void *) info->dst.resource;
iris_format_for_usage(devinfo, info->src.format,
ISL_SURF_USAGE_TEXTURE_BIT);
enum isl_aux_usage src_aux_usage =
- iris_resource_texture_aux_usage(ice, src_res, src_fmt.fmt, 0);
+ iris_resource_texture_aux_usage(ice, src_res, src_fmt.fmt);
- if (src_aux_usage == ISL_AUX_USAGE_HIZ)
- src_aux_usage = ISL_AUX_USAGE_NONE;
+ if (iris_resource_level_has_hiz(src_res, info->src.level))
+ assert(src_res->surf.format == src_fmt.fmt);
- bool src_clear_supported = src_aux_usage != ISL_AUX_USAGE_NONE &&
+ bool src_clear_supported = isl_aux_usage_has_fast_clears(src_aux_usage) &&
src_res->surf.format == src_fmt.fmt;
- iris_resource_prepare_access(ice, src_res, info->src.level, 1,
+ iris_resource_prepare_access(ice, batch, src_res, info->src.level, 1,
info->src.box.z, info->src.box.depth,
src_aux_usage, src_clear_supported);
iris_format_for_usage(devinfo, info->dst.format,
ISL_SURF_USAGE_RENDER_TARGET_BIT);
enum isl_aux_usage dst_aux_usage =
- iris_resource_render_aux_usage(ice, dst_res, dst_fmt.fmt, false, false);
- bool dst_clear_supported = dst_aux_usage != ISL_AUX_USAGE_NONE;
+ iris_resource_blorp_write_aux_usage(ice, dst_res, dst_fmt.fmt);
+ bool dst_clear_supported = isl_aux_usage_has_fast_clears(dst_aux_usage);
struct blorp_surf src_surf, dst_surf;
- iris_blorp_surf_for_resource(&src_surf, info->src.resource,
- ISL_AUX_USAGE_NONE, false);
- iris_blorp_surf_for_resource(&dst_surf, info->dst.resource,
- ISL_AUX_USAGE_NONE, true);
-
- iris_resource_prepare_access(ice, dst_res, info->dst.level, 1,
+ iris_blorp_surf_for_resource(&ice->vtbl, &screen->isl_dev, &src_surf,
+ info->src.resource, src_aux_usage,
+ info->src.level, false);
+ iris_blorp_surf_for_resource(&ice->vtbl, &screen->isl_dev, &dst_surf,
+ info->dst.resource, dst_aux_usage,
+ info->dst.level, true);
+
+ iris_resource_prepare_access(ice, batch, dst_res, info->dst.level, 1,
info->dst.box.z, info->dst.box.depth,
dst_aux_usage, dst_clear_supported);
filter = BLORP_FILTER_NEAREST;
}
- struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
+ if (iris_batch_references(batch, src_res->bo))
+ tex_cache_flush_hack(batch, src_fmt.fmt, src_res->surf.format);
+
+ if (dst_res->base.target == PIPE_BUFFER)
+ util_range_add(&dst_res->base, &dst_res->valid_buffer_range, dst_x0, dst_x1);
struct blorp_batch blorp_batch;
blorp_batch_init(&ice->blorp, &blorp_batch, batch, blorp_flags);
unsigned main_mask;
- if (info->dst.format == PIPE_FORMAT_S8_UINT)
- main_mask = PIPE_MASK_S;
- else if (util_format_is_depth_or_stencil(info->dst.format))
+ if (util_format_is_depth_or_stencil(info->dst.format))
main_mask = PIPE_MASK_Z;
else
main_mask = PIPE_MASK_RGBA;
&src_surf, info->src.level, info->src.box.z + slice,
src_fmt.fmt, src_fmt.swizzle,
&dst_surf, info->dst.level, info->dst.box.z + slice,
- dst_fmt.fmt, ISL_SWIZZLE_IDENTITY,
+ dst_fmt.fmt, dst_fmt.swizzle,
src_x0, src_y0, src_x1, src_y1,
dst_x0, dst_y0, dst_x1, dst_y1,
filter, mirror_x, mirror_y);
}
}
+ struct iris_resource *stc_dst = NULL;
+ enum isl_aux_usage stc_src_aux_usage, stc_dst_aux_usage;
if ((info->mask & PIPE_MASK_S) &&
- util_format_is_depth_and_stencil(info->dst.format) &&
+ util_format_has_stencil(util_format_description(info->dst.format)) &&
util_format_has_stencil(util_format_description(info->src.format))) {
- struct iris_resource *src_res, *dst_res, *junk;
+ struct iris_resource *src_res, *junk;
+ struct blorp_surf src_surf, dst_surf;
iris_get_depth_stencil_resources(info->src.resource, &junk, &src_res);
- iris_get_depth_stencil_resources(info->dst.resource, &junk, &dst_res);
- iris_blorp_surf_for_resource(&src_surf, &src_res->base,
- ISL_AUX_USAGE_NONE, false);
- iris_blorp_surf_for_resource(&dst_surf, &dst_res->base,
- ISL_AUX_USAGE_NONE, true);
+ iris_get_depth_stencil_resources(info->dst.resource, &junk, &stc_dst);
+
+ struct iris_format_info src_fmt =
+ iris_format_for_usage(devinfo, src_res->base.format,
+ ISL_SURF_USAGE_TEXTURE_BIT);
+ stc_src_aux_usage =
+ iris_resource_texture_aux_usage(ice, src_res, src_fmt.fmt);
+
+ struct iris_format_info dst_fmt =
+ iris_format_for_usage(devinfo, stc_dst->base.format,
+ ISL_SURF_USAGE_RENDER_TARGET_BIT);
+ stc_dst_aux_usage =
+ iris_resource_blorp_write_aux_usage(ice, stc_dst, dst_fmt.fmt);
+
+ iris_resource_prepare_access(ice, batch, src_res, info->src.level, 1,
+ info->src.box.z, info->src.box.depth,
+ stc_src_aux_usage, false);
+ iris_resource_prepare_access(ice, batch, stc_dst, info->dst.level, 1,
+ info->dst.box.z, info->dst.box.depth,
+ stc_dst_aux_usage, false);
+ iris_blorp_surf_for_resource(&ice->vtbl, &screen->isl_dev, &src_surf,
+ &src_res->base, stc_src_aux_usage,
+ info->src.level, false);
+ iris_blorp_surf_for_resource(&ice->vtbl, &screen->isl_dev, &dst_surf,
+ &stc_dst->base, stc_dst_aux_usage,
+ info->dst.level, true);
for (int slice = 0; slice < info->dst.box.depth; slice++) {
iris_batch_maybe_flush(batch, 1500);
blorp_batch_finish(&blorp_batch);
- iris_resource_finish_write(ice, dst_res, info->dst.level, info->dst.box.z,
- info->dst.box.depth, dst_aux_usage);
+ tex_cache_flush_hack(batch, src_fmt.fmt, src_res->surf.format);
+
+ if (info->mask & main_mask) {
+ iris_resource_finish_write(ice, dst_res, info->dst.level, info->dst.box.z,
+ info->dst.box.depth, dst_aux_usage);
+ }
+
+ if (stc_dst) {
+ iris_resource_finish_write(ice, stc_dst, info->dst.level, info->dst.box.z,
+ info->dst.box.depth, stc_dst_aux_usage);
+ }
iris_flush_and_dirty_for_history(ice, batch, (struct iris_resource *)
- info->dst.resource);
+ info->dst.resource,
+ PIPE_CONTROL_RENDER_TARGET_FLUSH,
+ "cache history: post-blit");
}
static void
-get_copy_region_aux_settings(const struct gen_device_info *devinfo,
+get_copy_region_aux_settings(struct iris_context *ice,
struct iris_resource *res,
enum isl_aux_usage *out_aux_usage,
- bool *out_clear_supported)
+ bool *out_clear_supported,
+ bool is_render_target)
{
+ struct iris_screen *screen = (void *) ice->ctx.screen;
+ const struct gen_device_info *devinfo = &screen->devinfo;
+
switch (res->aux.usage) {
+ case ISL_AUX_USAGE_HIZ:
+ case ISL_AUX_USAGE_HIZ_CCS:
+ case ISL_AUX_USAGE_HIZ_CCS_WT:
+ if (is_render_target) {
+ *out_aux_usage = res->aux.usage;
+ } else {
+ *out_aux_usage = iris_resource_texture_aux_usage(ice, res,
+ res->surf.format);
+ }
+ *out_clear_supported = (*out_aux_usage != ISL_AUX_USAGE_NONE);
+ break;
case ISL_AUX_USAGE_MCS:
+ case ISL_AUX_USAGE_MCS_CCS:
case ISL_AUX_USAGE_CCS_E:
*out_aux_usage = res->aux.usage;
/* Prior to Gen9, fast-clear only supported 0/1 clear colors. Since
*/
*out_clear_supported = devinfo->gen >= 9;
break;
+ case ISL_AUX_USAGE_STC_CCS:
+ *out_aux_usage = res->aux.usage;
+ *out_clear_supported = false;
+ break;
default:
*out_aux_usage = ISL_AUX_USAGE_NONE;
*out_clear_supported = false;
}
/**
- * The pipe->resource_copy_region() driver hook.
+ * Perform a GPU-based raw memory copy between compatible view classes.
*
- * This implements ARB_copy_image semantics - a raw memory copy between
- * compatible view classes.
+ * Does not perform any flushing - the new data may still be left in the
+ * render cache, and old data may remain in other caches.
+ *
+ * Wraps blorp_copy() and blorp_buffer_copy().
*/
-static void
-iris_resource_copy_region(struct pipe_context *ctx,
- struct pipe_resource *dst,
- unsigned dst_level,
- unsigned dstx, unsigned dsty, unsigned dstz,
- struct pipe_resource *src,
- unsigned src_level,
- const struct pipe_box *src_box)
+void
+iris_copy_region(struct blorp_context *blorp,
+ struct iris_batch *batch,
+ struct pipe_resource *dst,
+ unsigned dst_level,
+ unsigned dstx, unsigned dsty, unsigned dstz,
+ struct pipe_resource *src,
+ unsigned src_level,
+ const struct pipe_box *src_box)
{
- struct iris_screen *screen = (void *) ctx->screen;
- const struct gen_device_info *devinfo = &screen->devinfo;
struct blorp_batch blorp_batch;
- struct iris_context *ice = (void *) ctx;
- struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
+ struct iris_context *ice = blorp->driver_ctx;
+ struct iris_screen *screen = (void *) ice->ctx.screen;
struct iris_resource *src_res = (void *) src;
struct iris_resource *dst_res = (void *) dst;
enum isl_aux_usage src_aux_usage, dst_aux_usage;
bool src_clear_supported, dst_clear_supported;
- get_copy_region_aux_settings(devinfo, src_res, &src_aux_usage,
- &src_clear_supported);
- get_copy_region_aux_settings(devinfo, dst_res, &dst_aux_usage,
- &dst_clear_supported);
+ get_copy_region_aux_settings(ice, src_res, &src_aux_usage,
+ &src_clear_supported, false);
+ get_copy_region_aux_settings(ice, dst_res, &dst_aux_usage,
+ &dst_clear_supported, true);
- iris_resource_prepare_access(ice, src_res, src_level, 1,
- src_box->z, src_box->depth,
- src_aux_usage, src_clear_supported);
- iris_resource_prepare_access(ice, dst_res, dst_level, 1,
- dstz, src_box->depth,
- dst_aux_usage, dst_clear_supported);
+ if (iris_batch_references(batch, src_res->bo))
+ tex_cache_flush_hack(batch, ISL_FORMAT_UNSUPPORTED, src_res->surf.format);
- blorp_batch_init(&ice->blorp, &blorp_batch, batch, 0);
+ if (dst->target == PIPE_BUFFER)
+ util_range_add(&dst_res->base, &dst_res->valid_buffer_range, dstx, dstx + src_box->width);
if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
struct blorp_address src_addr = {
};
struct blorp_address dst_addr = {
.buffer = iris_resource_bo(dst), .offset = dstx,
+ .reloc_flags = EXEC_OBJECT_WRITE,
};
iris_batch_maybe_flush(batch, 1500);
+ blorp_batch_init(&ice->blorp, &blorp_batch, batch, 0);
blorp_buffer_copy(&blorp_batch, src_addr, dst_addr, src_box->width);
+ blorp_batch_finish(&blorp_batch);
} else {
// XXX: what about one surface being a buffer and not the other?
struct blorp_surf src_surf, dst_surf;
- iris_blorp_surf_for_resource(&src_surf, src, src_aux_usage, false);
- iris_blorp_surf_for_resource(&dst_surf, dst, dst_aux_usage, true);
+ iris_blorp_surf_for_resource(&ice->vtbl, &screen->isl_dev, &src_surf,
+ src, src_aux_usage, src_level, false);
+ iris_blorp_surf_for_resource(&ice->vtbl, &screen->isl_dev, &dst_surf,
+ dst, dst_aux_usage, dst_level, true);
+
+ iris_resource_prepare_access(ice, batch, src_res, src_level, 1,
+ src_box->z, src_box->depth,
+ src_aux_usage, src_clear_supported);
+ iris_resource_prepare_access(ice, batch, dst_res, dst_level, 1,
+ dstz, src_box->depth,
+ dst_aux_usage, dst_clear_supported);
+
+ blorp_batch_init(&ice->blorp, &blorp_batch, batch, 0);
for (int slice = 0; slice < src_box->depth; slice++) {
iris_batch_maybe_flush(batch, 1500);
src_box->x, src_box->y, dstx, dsty,
src_box->width, src_box->height);
}
+ blorp_batch_finish(&blorp_batch);
+
+ iris_resource_finish_write(ice, dst_res, dst_level, dstz,
+ src_box->depth, dst_aux_usage);
}
- blorp_batch_finish(&blorp_batch);
+ tex_cache_flush_hack(batch, ISL_FORMAT_UNSUPPORTED, src_res->surf.format);
+}
+
+static struct iris_batch *
+get_preferred_batch(struct iris_context *ice, struct iris_bo *bo)
+{
+ /* If the compute batch is already using this buffer, we'd prefer to
+ * continue queueing in the compute batch.
+ */
+ if (iris_batch_references(&ice->batches[IRIS_BATCH_COMPUTE], bo))
+ return &ice->batches[IRIS_BATCH_COMPUTE];
+
+ /* Otherwise default to the render batch. */
+ return &ice->batches[IRIS_BATCH_RENDER];
+}
- iris_resource_finish_write(ice, dst_res, dst_level, dstz, src_box->depth,
- dst_aux_usage);
- iris_flush_and_dirty_for_history(ice, batch, (struct iris_resource *) dst);
+/**
+ * The pipe->resource_copy_region() driver hook.
+ *
+ * This implements ARB_copy_image semantics - a raw memory copy between
+ * compatible view classes.
+ */
+static void
+iris_resource_copy_region(struct pipe_context *ctx,
+ struct pipe_resource *dst,
+ unsigned dst_level,
+ unsigned dstx, unsigned dsty, unsigned dstz,
+ struct pipe_resource *src,
+ unsigned src_level,
+ const struct pipe_box *src_box)
+{
+ struct iris_context *ice = (void *) ctx;
+ struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
+
+ /* Use MI_COPY_MEM_MEM for tiny (<= 16 byte, % 4) buffer copies. */
+ if (src->target == PIPE_BUFFER && dst->target == PIPE_BUFFER &&
+ (src_box->width % 4 == 0) && src_box->width <= 16) {
+ struct iris_bo *dst_bo = iris_resource_bo(dst);
+ batch = get_preferred_batch(ice, dst_bo);
+ iris_batch_maybe_flush(batch, 24 + 5 * (src_box->width / 4));
+ iris_emit_pipe_control_flush(batch,
+ "stall for MI_COPY_MEM_MEM copy_region",
+ PIPE_CONTROL_CS_STALL);
+ ice->vtbl.copy_mem_mem(batch, dst_bo, dstx, iris_resource_bo(src),
+ src_box->x, src_box->width);
+ return;
+ }
+
+ iris_copy_region(&ice->blorp, batch, dst, dst_level, dstx, dsty, dstz,
+ src, src_level, src_box);
+
+ if (util_format_is_depth_and_stencil(dst->format) &&
+ util_format_has_stencil(util_format_description(src->format))) {
+ struct iris_resource *junk, *s_src_res, *s_dst_res;
+ iris_get_depth_stencil_resources(src, &junk, &s_src_res);
+ iris_get_depth_stencil_resources(dst, &junk, &s_dst_res);
+
+ iris_copy_region(&ice->blorp, batch, &s_dst_res->base, dst_level, dstx,
+ dsty, dstz, &s_src_res->base, src_level, src_box);
+ }
+
+ iris_flush_and_dirty_for_history(ice, batch, (struct iris_resource *) dst,
+ PIPE_CONTROL_RENDER_TARGET_FLUSH,
+ "cache history: post copy_region");
}
void