blorp_surface_reloc(struct blorp_batch *batch, uint32_t ss_offset,
struct blorp_address address, uint32_t delta);
+#if GEN_GEN >= 7
+static struct blorp_address
+blorp_get_surface_base_address(struct blorp_batch *batch);
+#endif
+
static void
blorp_emit_urb_config(struct blorp_batch *batch,
unsigned vs_entry_size, unsigned sf_entry_size);
#endif /* GEN_GEN >= 6 */
+#if GEN_GEN >= 7 && GEN_GEN <= 10
+static void
+blorp_emit_memcpy(struct blorp_batch *batch,
+ struct blorp_address dst,
+ struct blorp_address src,
+ uint32_t size)
+{
+ assert(size % 4 == 0);
+
+ for (unsigned dw = 0; dw < size; dw += 4) {
+#if GEN_GEN >= 8
+ blorp_emit(batch, GENX(MI_COPY_MEM_MEM), cp) {
+ cp.DestinationMemoryAddress = dst;
+ cp.SourceMemoryAddress = src;
+ }
+#else
+ /* IVB does not have a general purpose register for command streamer
+ * commands. Therefore, we use an alternate temporary register.
+ */
+#define BLORP_TEMP_REG 0x2440 /* GEN7_3DPRIM_BASE_VERTEX */
+ blorp_emit(batch, GENX(MI_LOAD_REGISTER_MEM), load) {
+ load.RegisterAddress = BLORP_TEMP_REG;
+ load.MemoryAddress = src;
+ }
+ blorp_emit(batch, GENX(MI_STORE_REGISTER_MEM), store) {
+ store.RegisterAddress = BLORP_TEMP_REG;
+ store.MemoryAddress = dst;
+ }
+#undef BLORP_TEMP_REG
+#endif
+ dst.offset += 4;
+ src.offset += 4;
+ }
+}
+#endif
+
static void
blorp_emit_surface_state(struct blorp_batch *batch,
const struct brw_blorp_surface_info *surface,
}
blorp_flush_range(batch, state, GENX(RENDER_SURFACE_STATE_length) * 4);
+
+ if (surface->clear_color_addr.buffer) {
+#if GEN_GEN > 10
+ unreachable("Implement indirect clear support on gen11+");
+#elif GEN_GEN >= 7 && GEN_GEN <= 10
+ struct blorp_address dst_addr = blorp_get_surface_base_address(batch);
+ dst_addr.offset += state_offset + isl_dev->ss.clear_value_offset;
+ blorp_emit_memcpy(batch, dst_addr, surface->clear_color_addr,
+ isl_dev->ss.clear_value_size);
+#else
+ unreachable("Fast clears are only supported on gen7+");
+#endif
+ }
}
static void
uint32_t bind_offset, surface_offsets[2];
void *surface_maps[2];
+ MAYBE_UNUSED bool has_indirect_clear_color = false;
if (params->use_pre_baked_binding_table) {
bind_offset = params->pre_baked_binding_table_offset;
} else {
surface_maps[BLORP_RENDERBUFFER_BT_INDEX],
surface_offsets[BLORP_RENDERBUFFER_BT_INDEX],
params->color_write_disable, true);
+ if (params->dst.clear_color_addr.buffer != NULL)
+ has_indirect_clear_color = true;
} else {
assert(params->depth.enabled || params->stencil.enabled);
const struct brw_blorp_surface_info *surface =
surface_maps[BLORP_TEXTURE_BT_INDEX],
surface_offsets[BLORP_TEXTURE_BT_INDEX],
NULL, false);
+ if (params->src.clear_color_addr.buffer != NULL)
+ has_indirect_clear_color = true;
}
}
+#if GEN_GEN >= 7 && GEN_GEN <= 10
+ if (has_indirect_clear_color) {
+ /* Updating a surface state object may require that the state cache be
+ * invalidated. From the SKL PRM, Shared Functions -> State -> State
+ * Caching:
+ *
+ * Whenever the RENDER_SURFACE_STATE object in memory pointed to by
+ * the Binding Table Pointer (BTP) and Binding Table Index (BTI) is
+ * modified [...], the L1 state cache must be invalidated to ensure
+ * the new surface or sampler state is fetched from system memory.
+ */
+ blorp_emit(batch, GENX(PIPE_CONTROL), pipe) {
+ pipe.StateCacheInvalidationEnable = true;
+ }
+ }
+#endif
+
#if GEN_GEN >= 7
blorp_emit(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), bt);
blorp_emit(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_HS), bt);