* PIPE_CONTROL_* bits), and it will take care of splitting it into multiple
* PIPE_CONTROL commands as necessary. The per-generation workarounds are
* applied in iris_emit_raw_pipe_control() in iris_state.c.
- *
- * This file also contains our cache tracking helpers. We have sets for
- * the render cache, depth cache, and so on. If a BO is in the set, then
- * it may have data in that cache. These take care of emitting flushes for
- * render-to-texture, format reinterpretation issues, and other situations.
*/
#include "iris_context.h"
batch->screen->workaround_bo, 0, 0);
}
-void
-iris_cache_sets_clear(struct iris_batch *batch)
-{
- struct hash_entry *render_entry;
- hash_table_foreach(batch->cache.render, render_entry)
- _mesa_hash_table_remove(batch->cache.render, render_entry);
-
- struct set_entry *depth_entry;
- set_foreach(batch->cache.depth, depth_entry)
- _mesa_set_remove(batch->cache.depth, depth_entry);
-}
-
-/**
- * Emits an appropriate flush for a BO if it has been rendered to within the
- * same batchbuffer as a read that's about to be emitted.
- *
- * The GPU has separate, incoherent caches for the render cache and the
- * sampler cache, along with other caches. Usually data in the different
- * caches don't interact (e.g. we don't render to our driver-generated
- * immediate constant data), but for render-to-texture in FBOs we definitely
- * do. When a batchbuffer is flushed, the kernel will ensure that everything
- * necessary is flushed before another use of that BO, but for reuse from
- * different caches within a batchbuffer, it's all our responsibility.
- */
-static void
-flush_depth_and_render_caches(struct iris_batch *batch)
-{
- iris_emit_pipe_control_flush(batch,
- PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- PIPE_CONTROL_RENDER_TARGET_FLUSH |
- PIPE_CONTROL_CS_STALL);
-
- iris_emit_pipe_control_flush(batch,
- PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
- PIPE_CONTROL_CONST_CACHE_INVALIDATE);
-
- iris_cache_sets_clear(batch);
-}
-
-void
-iris_cache_flush_for_read(struct iris_batch *batch,
- struct iris_bo *bo)
-{
- if (_mesa_hash_table_search(batch->cache.render, bo) ||
- _mesa_set_search(batch->cache.depth, bo))
- flush_depth_and_render_caches(batch);
-}
-
-static void *
-format_aux_tuple(enum isl_format format, enum isl_aux_usage aux_usage)
-{
- return (void *)(uintptr_t)((uint32_t)format << 8 | aux_usage);
-}
-
-void
-iris_cache_flush_for_render(struct iris_batch *batch,
- struct iris_bo *bo,
- enum isl_format format,
- enum isl_aux_usage aux_usage)
-{
- if (_mesa_set_search(batch->cache.depth, bo))
- flush_depth_and_render_caches(batch);
-
- /* Check to see if this bo has been used by a previous rendering operation
- * but with a different format or aux usage. If it has, flush the render
- * cache so we ensure that it's only in there with one format or aux usage
- * at a time.
- *
- * Even though it's not obvious, this can easily happen in practice.
- * Suppose a client is blending on a surface with sRGB encode enabled on
- * gen9. This implies that you get AUX_USAGE_CCS_D at best. If the client
- * then disables sRGB decode and continues blending we will flip on
- * AUX_USAGE_CCS_E without doing any sort of resolve in-between (this is
- * perfectly valid since CCS_E is a subset of CCS_D). However, this means
- * that we have fragments in-flight which are rendering with UNORM+CCS_E
- * and other fragments in-flight with SRGB+CCS_D on the same surface at the
- * same time and the pixel scoreboard and color blender are trying to sort
- * it all out. This ends badly (i.e. GPU hangs).
- *
- * To date, we have never observed GPU hangs or even corruption to be
- * associated with switching the format, only the aux usage. However,
- * there are comments in various docs which indicate that the render cache
- * isn't 100% resilient to format changes. We may as well be conservative
- * and flush on format changes too. We can always relax this later if we
- * find it to be a performance problem.
- */
- struct hash_entry *entry = _mesa_hash_table_search(batch->cache.render, bo);
- if (entry && entry->data != format_aux_tuple(format, aux_usage))
- flush_depth_and_render_caches(batch);
-}
-
-void
-iris_render_cache_add_bo(struct iris_batch *batch,
- struct iris_bo *bo,
- enum isl_format format,
- enum isl_aux_usage aux_usage)
-{
-#ifndef NDEBUG
- struct hash_entry *entry = _mesa_hash_table_search(batch->cache.render, bo);
- if (entry) {
- /* Otherwise, someone didn't do a flush_for_render and that would be
- * very bad indeed.
- */
- assert(entry->data == format_aux_tuple(format, aux_usage));
- }
-#endif
-
- _mesa_hash_table_insert(batch->cache.render, bo,
- format_aux_tuple(format, aux_usage));
-}
-
-void
-iris_cache_flush_for_depth(struct iris_batch *batch,
- struct iris_bo *bo)
-{
- if (_mesa_hash_table_search(batch->cache.render, bo))
- flush_depth_and_render_caches(batch);
-}
-
-void
-iris_depth_cache_add_bo(struct iris_batch *batch, struct iris_bo *bo)
-{
- _mesa_set_add(batch->cache.depth, bo);
-}
-
static void
iris_texture_barrier(struct pipe_context *ctx, unsigned flags)
{
struct iris_context *ice = (void *) ctx;
+ struct iris_batch *render_batch = &ice->batches[IRIS_BATCH_RENDER];
+ struct iris_batch *compute_batch = &ice->batches[IRIS_BATCH_COMPUTE];
+
+ if (render_batch->contains_draw ||
+ render_batch->cache.render->entries ||
+ render_batch->cache.depth->entries) {
+ iris_emit_pipe_control_flush(render_batch,
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_RENDER_TARGET_FLUSH |
+ PIPE_CONTROL_CS_STALL);
+ iris_emit_pipe_control_flush(render_batch,
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
+ }
- // XXX: compute batch?
-
- flush_depth_and_render_caches(&ice->render_batch);
+ if (compute_batch->contains_draw) {
+ iris_emit_pipe_control_flush(compute_batch,
+ PIPE_CONTROL_CS_STALL);
+ iris_emit_pipe_control_flush(compute_batch,
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
+ }
}
static void
PIPE_CONTROL_CONST_CACHE_INVALIDATE;
}
- if (flags & PIPE_BARRIER_TEXTURE) {
- bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- }
-
- if (flags & PIPE_BARRIER_FRAMEBUFFER) {
+ if (flags & (PIPE_BARRIER_TEXTURE | PIPE_BARRIER_FRAMEBUFFER)) {
bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_RENDER_TARGET_FLUSH;
}
- // XXX: MAPPED_BUFFER, QUERY_BUFFER, STREAMOUT_BUFFER, GLOBAL_BUFFER?
- // XXX: compute batch?
-
- iris_emit_pipe_control_flush(&ice->render_batch, bits);
+ for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
+ if (ice->batches[i].contains_draw ||
+ ice->batches[i].cache.render->entries)
+ iris_emit_pipe_control_flush(&ice->batches[i], bits);
+ }
}
void