* clipping 4 * 2 = 8 > 5 in the src.
*/
+ if (*src_x0 == *src_x1 || *src_y0 == *src_y1
+ || *dst_x0 == *dst_x1 || *dst_y0 == *dst_y1)
+ return true;
+
float scale_x = (float) (*src_x1 - *src_x0) / (*dst_x1 - *dst_x0);
float scale_y = (float) (*src_y1 - *src_y0) / (*dst_y1 - *dst_y0);
clip_coordinates(mirror_y, src_y1, dst_y1, dst_y0,
clip_dst_y1, clip_dst_y0, scale_y, false);
- return false;
+ /* Check for invalid bounds
+ * Can't blit for 0-dimensions
+ */
+ return *src_x0 == *src_x1 || *src_y0 == *src_y1
+ || *dst_x0 == *dst_x1 || *dst_y0 == *dst_y1;
}
void
-iris_blorp_surf_for_resource(struct blorp_surf *surf,
+iris_blorp_surf_for_resource(struct iris_vtable *vtbl,
+ struct blorp_surf *surf,
struct pipe_resource *p_res,
enum isl_aux_usage aux_usage,
+ unsigned level,
bool is_render_target)
{
struct iris_resource *res = (void *) p_res;
+ if (aux_usage == ISL_AUX_USAGE_HIZ &&
+ !iris_resource_level_has_hiz(res, level))
+ aux_usage = ISL_AUX_USAGE_NONE;
+
*surf = (struct blorp_surf) {
.surf = &res->surf,
.addr = (struct blorp_address) {
.buffer = res->bo,
- .offset = 0, // XXX: ???
+ .offset = res->offset,
.reloc_flags = is_render_target ? EXEC_OBJECT_WRITE : 0,
- .mocs = I915_MOCS_CACHED, // XXX: BDW MOCS, PTE MOCS
+ .mocs = vtbl->mocs(res->bo),
},
.aux_usage = aux_usage,
};
- assert(surf->aux_usage == ISL_AUX_USAGE_NONE);
+ if (aux_usage != ISL_AUX_USAGE_NONE) {
+ surf->aux_surf = &res->aux.surf;
+ surf->aux_addr = (struct blorp_address) {
+ .buffer = res->aux.bo,
+ .offset = res->aux.offset,
+ .reloc_flags = is_render_target ? EXEC_OBJECT_WRITE : 0,
+ .mocs = vtbl->mocs(res->bo),
+ };
+ surf->clear_color =
+ iris_resource_get_clear_color(res, NULL, NULL);
+ surf->clear_color_addr = (struct blorp_address) {
+ .buffer = res->aux.clear_color_bo,
+ .offset = res->aux.clear_color_offset,
+ .reloc_flags = 0,
+ .mocs = vtbl->mocs(res->aux.clear_color_bo),
+ };
+ }
+
+ // XXX: ASTC
+}
+
+static void
+tex_cache_flush_hack(struct iris_batch *batch)
+{
+ /* The hardware seems to have issues with having a two different
+ * format views of the same texture in the sampler cache at the
+ * same time. It's unclear exactly what the issue is but it hurts
+ * blits and copies particularly badly because they often reinterpret
+ * formats. We badly need better understanding of the sampler issue
+ * and a better fix but this works for now and fixes CTS tests.
+ *
+ * If the BO hasn't been referenced yet this batch, we assume that the
+ * texture cache doesn't contain any relevant data nor need flushing.
+ *
+ * TODO: Remove this hack!
+ */
+ const char *reason =
+ "workaround: WaSamplerCacheFlushBetweenRedescribedSurfaceReads";
+
+ iris_emit_pipe_control_flush(batch, reason, PIPE_CONTROL_CS_STALL);
+ iris_emit_pipe_control_flush(batch, reason,
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
}
/**
struct iris_context *ice = (void *) ctx;
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
const struct gen_device_info *devinfo = &screen->devinfo;
+ struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
enum blorp_batch_flags blorp_flags = 0;
+ struct iris_resource *src_res = (void *) info->src.resource;
+ struct iris_resource *dst_res = (void *) info->dst.resource;
+
+ /* We don't support color masking. */
+ assert((info->mask & PIPE_MASK_RGBA) == PIPE_MASK_RGBA ||
+ (info->mask & PIPE_MASK_RGBA) == 0);
if (info->render_condition_enable) {
if (ice->state.predicate == IRIS_PREDICATE_STATE_DONT_RENDER)
blorp_flags |= BLORP_BATCH_PREDICATE_ENABLE;
}
- struct blorp_surf src_surf, dst_surf;
- iris_blorp_surf_for_resource(&src_surf, info->src.resource,
- ISL_AUX_USAGE_NONE, false);
- iris_blorp_surf_for_resource(&dst_surf, info->dst.resource,
- ISL_AUX_USAGE_NONE, true);
-
struct iris_format_info src_fmt =
iris_format_for_usage(devinfo, info->src.format,
ISL_SURF_USAGE_TEXTURE_BIT);
+ enum isl_aux_usage src_aux_usage =
+ iris_resource_texture_aux_usage(ice, src_res, src_fmt.fmt, 0);
+
+ if (src_aux_usage == ISL_AUX_USAGE_HIZ)
+ src_aux_usage = ISL_AUX_USAGE_NONE;
+
+ bool src_clear_supported = src_aux_usage != ISL_AUX_USAGE_NONE &&
+ src_res->surf.format == src_fmt.fmt;
+
+ iris_resource_prepare_access(ice, batch, src_res, info->src.level, 1,
+ info->src.box.z, info->src.box.depth,
+ src_aux_usage, src_clear_supported);
+
struct iris_format_info dst_fmt =
iris_format_for_usage(devinfo, info->dst.format,
ISL_SURF_USAGE_RENDER_TARGET_BIT);
+ enum isl_aux_usage dst_aux_usage =
+ iris_resource_render_aux_usage(ice, dst_res, dst_fmt.fmt, false, false);
+ bool dst_clear_supported = dst_aux_usage != ISL_AUX_USAGE_NONE;
+
+ struct blorp_surf src_surf, dst_surf;
+ iris_blorp_surf_for_resource(&ice->vtbl, &src_surf, info->src.resource,
+ src_aux_usage, info->src.level, false);
+ iris_blorp_surf_for_resource(&ice->vtbl, &dst_surf, info->dst.resource,
+ dst_aux_usage, info->dst.level, true);
+
+ iris_resource_prepare_access(ice, batch, dst_res, info->dst.level, 1,
+ info->dst.box.z, info->dst.box.depth,
+ dst_aux_usage, dst_clear_supported);
float src_x0 = info->src.box.x;
float src_x1 = info->src.box.x + info->src.box.width;
filter = BLORP_FILTER_NEAREST;
}
- struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
+ bool format_mismatch = src_fmt.fmt != src_res->surf.format;
+
+ if (format_mismatch && iris_batch_references(batch, src_res->bo))
+ tex_cache_flush_hack(batch);
+
+ if (dst_res->base.target == PIPE_BUFFER)
+ util_range_add(&dst_res->valid_buffer_range, dst_x0, dst_x1);
struct blorp_batch blorp_batch;
blorp_batch_init(&ice->blorp, &blorp_batch, batch, blorp_flags);
- for (int slice = 0; slice < info->dst.box.depth; slice++) {
- iris_batch_maybe_flush(batch, 1500);
+ unsigned main_mask;
+ if (util_format_is_depth_or_stencil(info->dst.format))
+ main_mask = PIPE_MASK_Z;
+ else
+ main_mask = PIPE_MASK_RGBA;
- blorp_blit(&blorp_batch,
- &src_surf, info->src.level, info->src.box.z + slice,
- src_fmt.fmt, src_fmt.swizzle,
- &dst_surf, info->dst.level, info->dst.box.z + slice,
- dst_fmt.fmt, ISL_SWIZZLE_IDENTITY,
- src_x0, src_y0, src_x1, src_y1,
- dst_x0, dst_y0, dst_x1, dst_y1,
- filter, mirror_x, mirror_y);
+ if (info->mask & main_mask) {
+ for (int slice = 0; slice < info->dst.box.depth; slice++) {
+ iris_batch_maybe_flush(batch, 1500);
+
+ blorp_blit(&blorp_batch,
+ &src_surf, info->src.level, info->src.box.z + slice,
+ src_fmt.fmt, src_fmt.swizzle,
+ &dst_surf, info->dst.level, info->dst.box.z + slice,
+ dst_fmt.fmt, dst_fmt.swizzle,
+ src_x0, src_y0, src_x1, src_y1,
+ dst_x0, dst_y0, dst_x1, dst_y1,
+ filter, mirror_x, mirror_y);
+ }
}
- if (util_format_is_depth_and_stencil(info->dst.format) &&
+ if ((info->mask & PIPE_MASK_S) &&
+ util_format_has_stencil(util_format_description(info->dst.format)) &&
util_format_has_stencil(util_format_description(info->src.format))) {
struct iris_resource *src_res, *dst_res, *junk;
iris_get_depth_stencil_resources(info->src.resource, &junk, &src_res);
iris_get_depth_stencil_resources(info->dst.resource, &junk, &dst_res);
- iris_blorp_surf_for_resource(&src_surf, &src_res->base,
- ISL_AUX_USAGE_NONE, false);
- iris_blorp_surf_for_resource(&dst_surf, &dst_res->base,
- ISL_AUX_USAGE_NONE, true);
+ iris_blorp_surf_for_resource(&ice->vtbl, &src_surf, &src_res->base,
+ ISL_AUX_USAGE_NONE, info->src.level, false);
+ iris_blorp_surf_for_resource(&ice->vtbl, &dst_surf, &dst_res->base,
+ ISL_AUX_USAGE_NONE, info->dst.level, true);
for (int slice = 0; slice < info->dst.box.depth; slice++) {
iris_batch_maybe_flush(batch, 1500);
blorp_batch_finish(&blorp_batch);
+ if (format_mismatch)
+ tex_cache_flush_hack(batch);
+
+ iris_resource_finish_write(ice, dst_res, info->dst.level, info->dst.box.z,
+ info->dst.box.depth, dst_aux_usage);
+
iris_flush_and_dirty_for_history(ice, batch, (struct iris_resource *)
- info->dst.resource);
+ info->dst.resource,
+ PIPE_CONTROL_RENDER_TARGET_FLUSH,
+ "cache history: post-blit");
+}
+
+static void
+get_copy_region_aux_settings(const struct gen_device_info *devinfo,
+ struct iris_resource *res,
+ enum isl_aux_usage *out_aux_usage,
+ bool *out_clear_supported)
+{
+ switch (res->aux.usage) {
+ case ISL_AUX_USAGE_MCS:
+ case ISL_AUX_USAGE_CCS_E:
+ *out_aux_usage = res->aux.usage;
+ /* Prior to Gen9, fast-clear only supported 0/1 clear colors. Since
+ * we're going to re-interpret the format as an integer format possibly
+ * with a different number of components, we can't handle clear colors
+ * until Gen9.
+ */
+ *out_clear_supported = devinfo->gen >= 9;
+ break;
+ default:
+ *out_aux_usage = ISL_AUX_USAGE_NONE;
+ *out_clear_supported = false;
+ break;
+ }
}
/**
- * The pipe->resource_copy_region() driver hook.
+ * Perform a GPU-based raw memory copy between compatible view classes.
*
- * This implements ARB_copy_image semantics - a raw memory copy between
- * compatible view classes.
+ * Does not perform any flushing - the new data may still be left in the
+ * render cache, and old data may remain in other caches.
+ *
+ * Wraps blorp_copy() and blorp_buffer_copy().
*/
-static void
-iris_resource_copy_region(struct pipe_context *ctx,
- struct pipe_resource *dst,
- unsigned dst_level,
- unsigned dstx, unsigned dsty, unsigned dstz,
- struct pipe_resource *src,
- unsigned src_level,
- const struct pipe_box *src_box)
+void
+iris_copy_region(struct blorp_context *blorp,
+ struct iris_batch *batch,
+ struct pipe_resource *dst,
+ unsigned dst_level,
+ unsigned dstx, unsigned dsty, unsigned dstz,
+ struct pipe_resource *src,
+ unsigned src_level,
+ const struct pipe_box *src_box)
{
struct blorp_batch blorp_batch;
- struct iris_context *ice = (void *) ctx;
- struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
+ struct iris_context *ice = blorp->driver_ctx;
+ struct iris_screen *screen = (void *) ice->ctx.screen;
+ const struct gen_device_info *devinfo = &screen->devinfo;
+ struct iris_resource *src_res = (void *) src;
+ struct iris_resource *dst_res = (void *) dst;
- iris_batch_maybe_flush(batch, 1500);
+ enum isl_aux_usage src_aux_usage, dst_aux_usage;
+ bool src_clear_supported, dst_clear_supported;
+ get_copy_region_aux_settings(devinfo, src_res, &src_aux_usage,
+ &src_clear_supported);
+ get_copy_region_aux_settings(devinfo, dst_res, &dst_aux_usage,
+ &dst_clear_supported);
- blorp_batch_init(&ice->blorp, &blorp_batch, batch, 0);
+ if (iris_batch_references(batch, src_res->bo))
+ tex_cache_flush_hack(batch);
+
+ if (dst->target == PIPE_BUFFER)
+ util_range_add(&dst_res->valid_buffer_range, dstx, dstx + src_box->width);
if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
struct blorp_address src_addr = {
};
struct blorp_address dst_addr = {
.buffer = iris_resource_bo(dst), .offset = dstx,
+ .reloc_flags = EXEC_OBJECT_WRITE,
};
+ iris_batch_maybe_flush(batch, 1500);
+
+ blorp_batch_init(&ice->blorp, &blorp_batch, batch, 0);
blorp_buffer_copy(&blorp_batch, src_addr, dst_addr, src_box->width);
+ blorp_batch_finish(&blorp_batch);
} else {
// XXX: what about one surface being a buffer and not the other?
struct blorp_surf src_surf, dst_surf;
- iris_blorp_surf_for_resource(&src_surf, src, ISL_AUX_USAGE_NONE, false);
- iris_blorp_surf_for_resource(&dst_surf, dst, ISL_AUX_USAGE_NONE, true);
+ iris_blorp_surf_for_resource(&ice->vtbl, &src_surf, src, src_aux_usage,
+ src_level, false);
+ iris_blorp_surf_for_resource(&ice->vtbl, &dst_surf, dst, dst_aux_usage,
+ dst_level, true);
+
+ iris_resource_prepare_access(ice, batch, src_res, src_level, 1,
+ src_box->z, src_box->depth,
+ src_aux_usage, src_clear_supported);
+ iris_resource_prepare_access(ice, batch, dst_res, dst_level, 1,
+ dstz, src_box->depth,
+ dst_aux_usage, dst_clear_supported);
- // XXX: ???
- unsigned dst_layer = dstz;
- unsigned src_layer = src_box->z;
+ blorp_batch_init(&ice->blorp, &blorp_batch, batch, 0);
- assert(src_box->depth == 1);
+ for (int slice = 0; slice < src_box->depth; slice++) {
+ iris_batch_maybe_flush(batch, 1500);
+
+ blorp_copy(&blorp_batch, &src_surf, src_level, src_box->z + slice,
+ &dst_surf, dst_level, dstz + slice,
+ src_box->x, src_box->y, dstx, dsty,
+ src_box->width, src_box->height);
+ }
+ blorp_batch_finish(&blorp_batch);
- blorp_copy(&blorp_batch, &src_surf, src_level, src_layer,
- &dst_surf, dst_level, dst_layer,
- src_box->x, src_box->y, dstx, dsty,
- src_box->width, src_box->height);
+ iris_resource_finish_write(ice, dst_res, dst_level, dstz,
+ src_box->depth, dst_aux_usage);
}
- blorp_batch_finish(&blorp_batch);
+ tex_cache_flush_hack(batch);
+}
+
+static struct iris_batch *
+get_preferred_batch(struct iris_context *ice, struct iris_bo *bo)
+{
+ /* If the compute batch is already using this buffer, we'd prefer to
+ * continue queueing in the compute batch.
+ */
+ if (iris_batch_references(&ice->batches[IRIS_BATCH_COMPUTE], bo))
+ return &ice->batches[IRIS_BATCH_COMPUTE];
+
+ /* Otherwise default to the render batch. */
+ return &ice->batches[IRIS_BATCH_RENDER];
+}
+
+
+/**
+ * The pipe->resource_copy_region() driver hook.
+ *
+ * This implements ARB_copy_image semantics - a raw memory copy between
+ * compatible view classes.
+ */
+static void
+iris_resource_copy_region(struct pipe_context *ctx,
+ struct pipe_resource *dst,
+ unsigned dst_level,
+ unsigned dstx, unsigned dsty, unsigned dstz,
+ struct pipe_resource *src,
+ unsigned src_level,
+ const struct pipe_box *src_box)
+{
+ struct iris_context *ice = (void *) ctx;
+ struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
+
+ /* Use MI_COPY_MEM_MEM for tiny (<= 16 byte, % 4) buffer copies. */
+ if (src->target == PIPE_BUFFER && dst->target == PIPE_BUFFER &&
+ (src_box->width % 4 == 0) && src_box->width <= 16) {
+ struct iris_bo *dst_bo = iris_resource_bo(dst);
+ batch = get_preferred_batch(ice, dst_bo);
+ iris_batch_maybe_flush(batch, 24 + 5 * (src_box->width / 4));
+ iris_emit_pipe_control_flush(batch,
+ "stall for MI_COPY_MEM_MEM copy_region",
+ PIPE_CONTROL_CS_STALL);
+ ice->vtbl.copy_mem_mem(batch, dst_bo, dstx, iris_resource_bo(src),
+ src_box->x, src_box->width);
+ return;
+ }
+
+ iris_copy_region(&ice->blorp, batch, dst, dst_level, dstx, dsty, dstz,
+ src, src_level, src_box);
+
+ if (util_format_is_depth_and_stencil(dst->format) &&
+ util_format_has_stencil(util_format_description(src->format))) {
+ struct iris_resource *junk, *s_src_res, *s_dst_res;
+ iris_get_depth_stencil_resources(src, &junk, &s_src_res);
+ iris_get_depth_stencil_resources(dst, &junk, &s_dst_res);
+
+ iris_copy_region(&ice->blorp, batch, &s_dst_res->base, dst_level, dstx,
+ dsty, dstz, &s_src_res->base, src_level, src_box);
+ }
- iris_flush_and_dirty_for_history(ice, batch, (struct iris_resource *) dst);
+ iris_flush_and_dirty_for_history(ice, batch, (struct iris_resource *) dst,
+ PIPE_CONTROL_RENDER_TARGET_FLUSH,
+ "cache history: post copy_region");
}
void