* PIPE_CONTROL_* bits), and it will take care of splitting it into multiple
* PIPE_CONTROL commands as necessary. The per-generation workarounds are
* applied in iris_emit_raw_pipe_control() in iris_state.c.
- *
- * This file also contains our cache tracking helpers. We have sets for
- * the render cache, depth cache, and so on. If a BO is in the set, then
- * it may have data in that cache. These take care of emitting flushes for
- * render-to-texture, format reinterpretation issues, and other situations.
*/
#include "iris_context.h"
* given generation.
*/
void
-iris_emit_pipe_control_flush(struct iris_batch *batch, uint32_t flags)
+iris_emit_pipe_control_flush(struct iris_batch *batch,
+ const char *reason,
+ uint32_t flags)
{
if ((flags & PIPE_CONTROL_CACHE_FLUSH_BITS) &&
(flags & PIPE_CONTROL_CACHE_INVALIDATE_BITS)) {
* with any write cache flush, so this shouldn't be a concern. In order
* to ensure a full stall, we do an end-of-pipe sync.
*/
- iris_emit_end_of_pipe_sync(batch, flags & PIPE_CONTROL_CACHE_FLUSH_BITS);
+ iris_emit_end_of_pipe_sync(batch, reason,
+ flags & PIPE_CONTROL_CACHE_FLUSH_BITS);
flags &= ~(PIPE_CONTROL_CACHE_FLUSH_BITS | PIPE_CONTROL_CS_STALL);
}
- batch->vtbl->emit_raw_pipe_control(batch, flags, NULL, 0, 0);
+ batch->screen->vtbl.emit_raw_pipe_control(batch, reason, flags, NULL, 0, 0);
}
/**
* - PIPE_CONTROL_WRITE_DEPTH_COUNT
*/
void
-iris_emit_pipe_control_write(struct iris_batch *batch, uint32_t flags,
+iris_emit_pipe_control_write(struct iris_batch *batch,
+ const char *reason, uint32_t flags,
struct iris_bo *bo, uint32_t offset,
uint64_t imm)
{
- batch->vtbl->emit_raw_pipe_control(batch, flags, bo, offset, imm);
+ batch->screen->vtbl.emit_raw_pipe_control(batch, reason, flags, bo, offset, imm);
}
/*
* Data" in the PIPE_CONTROL command.
*/
void
-iris_emit_end_of_pipe_sync(struct iris_batch *batch, uint32_t flags)
+iris_emit_end_of_pipe_sync(struct iris_batch *batch,
+ const char *reason, uint32_t flags)
{
/* From Sandybridge PRM, volume 2, "1.7.3.1 Writing a Value to Memory":
*
* Data, Required Write Cache Flush bits set)
* - Workload-2 (Can use the data produce or output by Workload-1)
*/
- iris_emit_pipe_control_write(batch, flags | PIPE_CONTROL_CS_STALL |
+ iris_emit_pipe_control_write(batch, reason,
+ flags | PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_WRITE_IMMEDIATE,
- batch->screen->workaround_bo, 0, 0);
-}
-
-void
-iris_cache_sets_clear(struct iris_batch *batch)
-{
- struct hash_entry *render_entry;
- hash_table_foreach(batch->cache.render, render_entry)
- _mesa_hash_table_remove(batch->cache.render, render_entry);
-
- struct set_entry *depth_entry;
- set_foreach(batch->cache.depth, depth_entry)
- _mesa_set_remove(batch->cache.depth, depth_entry);
+ batch->screen->workaround_address.bo,
+ batch->screen->workaround_address.offset, 0);
}
/**
- * Emits an appropriate flush for a BO if it has been rendered to within the
- * same batchbuffer as a read that's about to be emitted.
+ * Emits appropriate flushes and invalidations for any previous memory
+ * operations on \p bo to be strictly ordered relative to any subsequent
+ * memory operations performed from the caching domain \p access.
+ *
+ * This is useful because the GPU has separate incoherent caches for the
+ * render target, sampler, etc., which need to be explicitly invalidated or
+ * flushed in order to obtain the expected memory ordering in cases where the
+ * same surface is accessed through multiple caches (e.g. due to
+ * render-to-texture).
*
- * The GPU has separate, incoherent caches for the render cache and the
- * sampler cache, along with other caches. Usually data in the different
- * caches don't interact (e.g. we don't render to our driver-generated
- * immediate constant data), but for render-to-texture in FBOs we definitely
- * do. When a batchbuffer is flushed, the kernel will ensure that everything
- * necessary is flushed before another use of that BO, but for reuse from
- * different caches within a batchbuffer, it's all our responsibility.
+ * This provides the expected memory ordering guarantees whether or not the
+ * previous access was performed from the same batch or a different one, but
+ * only the former case needs to be handled explicitly here, since the kernel
+ * already inserts implicit flushes and synchronization in order to guarantee
+ * that any data dependencies between batches are satisfied.
+ *
+ * Even though no flushing nor invalidation is required in order to account
+ * for concurrent updates from other batches, we provide the guarantee that a
+ * required synchronization operation due to a previous batch-local update
+ * will never be omitted due to the influence of another thread accessing the
+ * same buffer concurrently from the same caching domain: Such a concurrent
+ * update will only ever change the seqno of the last update to a value
+ * greater than the local value (see iris_bo_bump_seqno()), which means that
+ * we will always emit at least as much flushing and invalidation as we would
+ * have for the local seqno (see the coherent_seqnos comparisons below).
*/
-static void
-flush_depth_and_render_caches(struct iris_batch *batch)
+void
+iris_emit_buffer_barrier_for(struct iris_batch *batch,
+ struct iris_bo *bo,
+ enum iris_domain access)
{
- iris_emit_pipe_control_flush(batch,
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- PIPE_CONTROL_RENDER_TARGET_FLUSH |
- PIPE_CONTROL_CS_STALL);
+ const uint32_t all_flush_bits = (PIPE_CONTROL_CACHE_FLUSH_BITS |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD |
+ PIPE_CONTROL_FLUSH_ENABLE);
+ const uint32_t flush_bits[NUM_IRIS_DOMAINS] = {
+ [IRIS_DOMAIN_RENDER_WRITE] = PIPE_CONTROL_RENDER_TARGET_FLUSH,
+ [IRIS_DOMAIN_DEPTH_WRITE] = PIPE_CONTROL_DEPTH_CACHE_FLUSH,
+ [IRIS_DOMAIN_OTHER_WRITE] = PIPE_CONTROL_FLUSH_ENABLE,
+ [IRIS_DOMAIN_OTHER_READ] = PIPE_CONTROL_STALL_AT_SCOREBOARD,
+ };
+ const uint32_t invalidate_bits[NUM_IRIS_DOMAINS] = {
+ [IRIS_DOMAIN_RENDER_WRITE] = PIPE_CONTROL_RENDER_TARGET_FLUSH,
+ [IRIS_DOMAIN_DEPTH_WRITE] = PIPE_CONTROL_DEPTH_CACHE_FLUSH,
+ [IRIS_DOMAIN_OTHER_WRITE] = PIPE_CONTROL_FLUSH_ENABLE,
+ [IRIS_DOMAIN_OTHER_READ] = (PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
+ PIPE_CONTROL_CONST_CACHE_INVALIDATE),
+ };
+ uint32_t bits = 0;
- iris_emit_pipe_control_flush(batch,
- PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
- PIPE_CONTROL_CONST_CACHE_INVALIDATE);
+ /* Iterate over all read/write domains first in order to handle RaW
+ * and WaW dependencies, which might involve flushing the domain of
+ * the previous access and invalidating the specified domain.
+ */
+ for (unsigned i = 0; i < IRIS_DOMAIN_OTHER_WRITE; i++) {
+ assert(!iris_domain_is_read_only(i));
+ if (i != access) {
+ const uint64_t seqno = READ_ONCE(bo->last_seqnos[i]);
- iris_cache_sets_clear(batch);
-}
+ /* Invalidate unless the most recent read/write access from
+ * this domain is already guaranteed to be visible to the
+ * specified domain. Flush if the most recent access from
+ * this domain occurred after its most recent flush.
+ */
+ if (seqno > batch->coherent_seqnos[access][i]) {
+ bits |= invalidate_bits[access];
-void
-iris_cache_flush_for_read(struct iris_batch *batch,
- struct iris_bo *bo)
-{
- if (_mesa_hash_table_search(batch->cache.render, bo) ||
- _mesa_set_search(batch->cache.depth, bo))
- flush_depth_and_render_caches(batch);
-}
+ if (seqno > batch->coherent_seqnos[i][i])
+ bits |= flush_bits[i];
+ }
+ }
+ }
-static void *
-format_aux_tuple(enum isl_format format, enum isl_aux_usage aux_usage)
-{
- return (void *)(uintptr_t)((uint32_t)format << 8 | aux_usage);
-}
+ /* All read-only domains can be considered mutually coherent since
+ * the order of read-only memory operations is immaterial. If the
+ * specified domain is read/write we need to iterate over them too,
+ * in order to handle any WaR dependencies.
+ */
+ if (!iris_domain_is_read_only(access)) {
+ for (unsigned i = IRIS_DOMAIN_OTHER_READ; i < NUM_IRIS_DOMAINS; i++) {
+ assert(iris_domain_is_read_only(i));
+ const uint64_t seqno = READ_ONCE(bo->last_seqnos[i]);
-void
-iris_cache_flush_for_render(struct iris_batch *batch,
- struct iris_bo *bo,
- enum isl_format format,
- enum isl_aux_usage aux_usage)
-{
- if (_mesa_set_search(batch->cache.depth, bo))
- flush_depth_and_render_caches(batch);
+ /* Flush if the most recent access from this domain occurred
+ * after its most recent flush.
+ */
+ if (seqno > batch->coherent_seqnos[i][i])
+ bits |= flush_bits[i];
+ }
+ }
- /* Check to see if this bo has been used by a previous rendering operation
- * but with a different format or aux usage. If it has, flush the render
- * cache so we ensure that it's only in there with one format or aux usage
- * at a time.
- *
- * Even though it's not obvious, this can easily happen in practice.
- * Suppose a client is blending on a surface with sRGB encode enabled on
- * gen9. This implies that you get AUX_USAGE_CCS_D at best. If the client
- * then disables sRGB decode and continues blending we will flip on
- * AUX_USAGE_CCS_E without doing any sort of resolve in-between (this is
- * perfectly valid since CCS_E is a subset of CCS_D). However, this means
- * that we have fragments in-flight which are rendering with UNORM+CCS_E
- * and other fragments in-flight with SRGB+CCS_D on the same surface at the
- * same time and the pixel scoreboard and color blender are trying to sort
- * it all out. This ends badly (i.e. GPU hangs).
- *
- * To date, we have never observed GPU hangs or even corruption to be
- * associated with switching the format, only the aux usage. However,
- * there are comments in various docs which indicate that the render cache
- * isn't 100% resilient to format changes. We may as well be conservative
- * and flush on format changes too. We can always relax this later if we
- * find it to be a performance problem.
+ /* The IRIS_DOMAIN_OTHER_WRITE kitchen-sink domain cannot be
+ * considered coherent with itself since it's really a collection
+ * of multiple incoherent read/write domains, so we special-case it
+ * here.
*/
- struct hash_entry *entry = _mesa_hash_table_search(batch->cache.render, bo);
- if (entry && entry->data != format_aux_tuple(format, aux_usage))
- flush_depth_and_render_caches(batch);
-}
+ const unsigned i = IRIS_DOMAIN_OTHER_WRITE;
+ const uint64_t seqno = READ_ONCE(bo->last_seqnos[i]);
-void
-iris_render_cache_add_bo(struct iris_batch *batch,
- struct iris_bo *bo,
- enum isl_format format,
- enum isl_aux_usage aux_usage)
-{
-#ifndef NDEBUG
- struct hash_entry *entry = _mesa_hash_table_search(batch->cache.render, bo);
- if (entry) {
- /* Otherwise, someone didn't do a flush_for_render and that would be
- * very bad indeed.
- */
- assert(entry->data == format_aux_tuple(format, aux_usage));
+ /* Invalidate unless the most recent read/write access from this
+ * domain is already guaranteed to be visible to the specified
+ * domain. Flush if the most recent access from this domain
+ * occurred after its most recent flush.
+ */
+ if (seqno > batch->coherent_seqnos[access][i]) {
+ bits |= invalidate_bits[access];
+
+ if (seqno > batch->coherent_seqnos[i][i])
+ bits |= flush_bits[i];
}
-#endif
- _mesa_hash_table_insert(batch->cache.render, bo,
- format_aux_tuple(format, aux_usage));
-}
+ if (bits) {
+ /* Stall-at-scoreboard is not expected to work in combination with other
+ * flush bits.
+ */
+ if (bits & PIPE_CONTROL_CACHE_FLUSH_BITS)
+ bits &= ~PIPE_CONTROL_STALL_AT_SCOREBOARD;
-void
-iris_cache_flush_for_depth(struct iris_batch *batch,
- struct iris_bo *bo)
-{
- if (_mesa_hash_table_search(batch->cache.render, bo))
- flush_depth_and_render_caches(batch);
+ /* Emit any required flushes and invalidations. */
+ if (bits & all_flush_bits)
+ iris_emit_end_of_pipe_sync(batch, "cache tracker: flush",
+ bits & all_flush_bits);
+
+ if (bits & ~all_flush_bits)
+ iris_emit_pipe_control_flush(batch, "cache tracker: invalidate",
+ bits & ~all_flush_bits);
+ }
}
+/**
+ * Flush and invalidate all caches (for debugging purposes).
+ */
void
-iris_depth_cache_add_bo(struct iris_batch *batch, struct iris_bo *bo)
+iris_flush_all_caches(struct iris_batch *batch)
{
- _mesa_set_add(batch->cache.depth, bo);
+ iris_emit_pipe_control_flush(batch, "debug: flush all caches",
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_DATA_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_RENDER_TARGET_FLUSH |
+ PIPE_CONTROL_VF_CACHE_INVALIDATE |
+ PIPE_CONTROL_INSTRUCTION_INVALIDATE |
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
+ PIPE_CONTROL_CONST_CACHE_INVALIDATE |
+ PIPE_CONTROL_STATE_CACHE_INVALIDATE);
}
static void
iris_texture_barrier(struct pipe_context *ctx, unsigned flags)
{
struct iris_context *ice = (void *) ctx;
+ struct iris_batch *render_batch = &ice->batches[IRIS_BATCH_RENDER];
+ struct iris_batch *compute_batch = &ice->batches[IRIS_BATCH_COMPUTE];
- // XXX: compute batch?
+ if (render_batch->contains_draw) {
+ iris_batch_maybe_flush(render_batch, 48);
+ iris_emit_pipe_control_flush(render_batch,
+ "API: texture barrier (1/2)",
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_RENDER_TARGET_FLUSH |
+ PIPE_CONTROL_CS_STALL);
+ iris_emit_pipe_control_flush(render_batch,
+ "API: texture barrier (2/2)",
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
+ }
- flush_depth_and_render_caches(&ice->render_batch);
+ if (compute_batch->contains_draw) {
+ iris_batch_maybe_flush(compute_batch, 48);
+ iris_emit_pipe_control_flush(compute_batch,
+ "API: texture barrier (1/2)",
+ PIPE_CONTROL_CS_STALL);
+ iris_emit_pipe_control_flush(compute_batch,
+ "API: texture barrier (2/2)",
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
+ }
}
static void
PIPE_CONTROL_CONST_CACHE_INVALIDATE;
}
- if (flags & PIPE_BARRIER_TEXTURE) {
- bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- }
-
- if (flags & PIPE_BARRIER_FRAMEBUFFER) {
+ if (flags & (PIPE_BARRIER_TEXTURE | PIPE_BARRIER_FRAMEBUFFER)) {
bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_RENDER_TARGET_FLUSH;
}
- // XXX: MAPPED_BUFFER, QUERY_BUFFER, STREAMOUT_BUFFER, GLOBAL_BUFFER?
- // XXX: compute batch?
-
- iris_emit_pipe_control_flush(&ice->render_batch, bits);
+ for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
+ if (ice->batches[i].contains_draw) {
+ iris_batch_maybe_flush(&ice->batches[i], 24);
+ iris_emit_pipe_control_flush(&ice->batches[i], "API: memory barrier",
+ bits);
+ }
+ }
}
void