* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
*
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * @file iris_pipe_control.c
+ *
+ * PIPE_CONTROL is the main flushing and synchronization primitive on Intel
+ * GPUs. It can invalidate caches, stall until rendering reaches various
+ * stages of completion, write to memory, and other things. In a way, it's
+ * a swiss army knife command - it has all kinds of capabilities, but some
+ * significant limitations as well.
+ *
+ * Unfortunately, it's notoriously complicated and difficult to use. Many
+ * sub-commands can't be used together. Some are meant to be used at the
+ * top of the pipeline (invalidating caches before drawing), while some are
+ * meant to be used at the end (stalling or flushing after drawing).
+ *
+ * Also, there's a list of restrictions a mile long, which vary by generation.
+ * Do this before doing that, or suffer the consequences (usually a GPU hang).
+ *
+ * This file contains helpers for emitting them safely. You can simply call
+ * iris_emit_pipe_control_flush() with the desired operations (as logical
+ * PIPE_CONTROL_* bits), and it will take care of splitting it into multiple
+ * PIPE_CONTROL commands as necessary. The per-generation workarounds are
+ * applied in iris_emit_raw_pipe_control() in iris_state.c.
*/
#include "iris_context.h"
* given generation.
*/
void
-iris_emit_pipe_control_flush(struct iris_batch *batch, uint32_t flags)
+iris_emit_pipe_control_flush(struct iris_batch *batch,
+ const char *reason,
+ uint32_t flags)
{
if ((flags & PIPE_CONTROL_CACHE_FLUSH_BITS) &&
(flags & PIPE_CONTROL_CACHE_INVALIDATE_BITS)) {
* with any write cache flush, so this shouldn't be a concern. In order
* to ensure a full stall, we do an end-of-pipe sync.
*/
- iris_emit_end_of_pipe_sync(batch, flags & PIPE_CONTROL_CACHE_FLUSH_BITS);
+ iris_emit_end_of_pipe_sync(batch, reason,
+ flags & PIPE_CONTROL_CACHE_FLUSH_BITS);
flags &= ~(PIPE_CONTROL_CACHE_FLUSH_BITS | PIPE_CONTROL_CS_STALL);
}
- batch->vtbl->emit_raw_pipe_control(batch, flags, NULL, 0, 0);
+ batch->screen->vtbl.emit_raw_pipe_control(batch, reason, flags, NULL, 0, 0);
}
/**
* - PIPE_CONTROL_WRITE_DEPTH_COUNT
*/
void
-iris_emit_pipe_control_write(struct iris_batch *batch, uint32_t flags,
+iris_emit_pipe_control_write(struct iris_batch *batch,
+ const char *reason, uint32_t flags,
struct iris_bo *bo, uint32_t offset,
uint64_t imm)
{
- batch->vtbl->emit_raw_pipe_control(batch, flags, bo, offset, imm);
+ batch->screen->vtbl.emit_raw_pipe_control(batch, reason, flags, bo, offset, imm);
}
/*
* Data" in the PIPE_CONTROL command.
*/
void
-iris_emit_end_of_pipe_sync(struct iris_batch *batch, uint32_t flags)
+iris_emit_end_of_pipe_sync(struct iris_batch *batch,
+ const char *reason, uint32_t flags)
{
/* From Sandybridge PRM, volume 2, "1.7.3.1 Writing a Value to Memory":
*
* Data, Required Write Cache Flush bits set)
* - Workload-2 (Can use the data produce or output by Workload-1)
*/
- iris_emit_pipe_control_write(batch, flags | PIPE_CONTROL_CS_STALL |
+ iris_emit_pipe_control_write(batch, reason,
+ flags | PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_WRITE_IMMEDIATE,
- batch->screen->workaround_bo, 0, 0);
-}
-
-void
-iris_cache_sets_clear(struct iris_batch *batch)
-{
- struct hash_entry *render_entry;
- hash_table_foreach(batch->cache.render, render_entry)
- _mesa_hash_table_remove(batch->cache.render, render_entry);
-
- struct set_entry *depth_entry;
- set_foreach(batch->cache.depth, depth_entry)
- _mesa_set_remove(batch->cache.depth, depth_entry);
+ batch->screen->workaround_address.bo,
+ batch->screen->workaround_address.offset, 0);
}
/**
- * Emits an appropriate flush for a BO if it has been rendered to within the
- * same batchbuffer as a read that's about to be emitted.
- *
- * The GPU has separate, incoherent caches for the render cache and the
- * sampler cache, along with other caches. Usually data in the different
- * caches don't interact (e.g. we don't render to our driver-generated
- * immediate constant data), but for render-to-texture in FBOs we definitely
- * do. When a batchbuffer is flushed, the kernel will ensure that everything
- * necessary is flushed before another use of that BO, but for reuse from
- * different caches within a batchbuffer, it's all our responsibility.
+ * Flush and invalidate all caches (for debugging purposes).
*/
-static void
-flush_depth_and_render_caches(struct iris_batch *batch)
+void
+iris_flush_all_caches(struct iris_batch *batch)
{
- iris_emit_pipe_control_flush(batch,
+ iris_emit_pipe_control_flush(batch, "debug: flush all caches",
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_DATA_CACHE_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_RENDER_TARGET_FLUSH |
- PIPE_CONTROL_CS_STALL);
-
- iris_emit_pipe_control_flush(batch,
+ PIPE_CONTROL_VF_CACHE_INVALIDATE |
+ PIPE_CONTROL_INSTRUCTION_INVALIDATE |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
- PIPE_CONTROL_CONST_CACHE_INVALIDATE);
-
- iris_cache_sets_clear(batch);
-}
-
-void
-iris_cache_flush_for_read(struct iris_batch *batch,
- struct iris_bo *bo)
-{
- if (_mesa_hash_table_search(batch->cache.render, bo) ||
- _mesa_set_search(batch->cache.depth, bo))
- flush_depth_and_render_caches(batch);
-}
-
-static void *
-format_aux_tuple(enum isl_format format, enum isl_aux_usage aux_usage)
-{
- return (void *)(uintptr_t)((uint32_t)format << 8 | aux_usage);
-}
-
-void
-iris_cache_flush_for_render(struct iris_batch *batch,
- struct iris_bo *bo,
- enum isl_format format,
- enum isl_aux_usage aux_usage)
-{
- if (_mesa_set_search(batch->cache.depth, bo))
- flush_depth_and_render_caches(batch);
-
- /* Check to see if this bo has been used by a previous rendering operation
- * but with a different format or aux usage. If it has, flush the render
- * cache so we ensure that it's only in there with one format or aux usage
- * at a time.
- *
- * Even though it's not obvious, this can easily happen in practice.
- * Suppose a client is blending on a surface with sRGB encode enabled on
- * gen9. This implies that you get AUX_USAGE_CCS_D at best. If the client
- * then disables sRGB decode and continues blending we will flip on
- * AUX_USAGE_CCS_E without doing any sort of resolve in-between (this is
- * perfectly valid since CCS_E is a subset of CCS_D). However, this means
- * that we have fragments in-flight which are rendering with UNORM+CCS_E
- * and other fragments in-flight with SRGB+CCS_D on the same surface at the
- * same time and the pixel scoreboard and color blender are trying to sort
- * it all out. This ends badly (i.e. GPU hangs).
- *
- * To date, we have never observed GPU hangs or even corruption to be
- * associated with switching the format, only the aux usage. However,
- * there are comments in various docs which indicate that the render cache
- * isn't 100% resilient to format changes. We may as well be conservative
- * and flush on format changes too. We can always relax this later if we
- * find it to be a performance problem.
- */
- struct hash_entry *entry = _mesa_hash_table_search(batch->cache.render, bo);
- if (entry && entry->data != format_aux_tuple(format, aux_usage))
- flush_depth_and_render_caches(batch);
-}
-
-void
-iris_render_cache_add_bo(struct iris_batch *batch,
- struct iris_bo *bo,
- enum isl_format format,
- enum isl_aux_usage aux_usage)
-{
-#ifndef NDEBUG
- struct hash_entry *entry = _mesa_hash_table_search(batch->cache.render, bo);
- if (entry) {
- /* Otherwise, someone didn't do a flush_for_render and that would be
- * very bad indeed.
- */
- assert(entry->data == format_aux_tuple(format, aux_usage));
- }
-#endif
-
- _mesa_hash_table_insert(batch->cache.render, bo,
- format_aux_tuple(format, aux_usage));
-}
-
-void
-iris_cache_flush_for_depth(struct iris_batch *batch,
- struct iris_bo *bo)
-{
- if (_mesa_hash_table_search(batch->cache.render, bo))
- flush_depth_and_render_caches(batch);
-}
-
-void
-iris_depth_cache_add_bo(struct iris_batch *batch, struct iris_bo *bo)
-{
- _mesa_set_add(batch->cache.depth, bo);
+ PIPE_CONTROL_CONST_CACHE_INVALIDATE |
+ PIPE_CONTROL_STATE_CACHE_INVALIDATE);
}
static void
iris_texture_barrier(struct pipe_context *ctx, unsigned flags)
{
struct iris_context *ice = (void *) ctx;
+ struct iris_batch *render_batch = &ice->batches[IRIS_BATCH_RENDER];
+ struct iris_batch *compute_batch = &ice->batches[IRIS_BATCH_COMPUTE];
+
+ if (render_batch->contains_draw ||
+ render_batch->cache.render->entries ||
+ render_batch->cache.depth->entries) {
+ iris_batch_maybe_flush(render_batch, 48);
+ iris_emit_pipe_control_flush(render_batch,
+ "API: texture barrier (1/2)",
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_RENDER_TARGET_FLUSH |
+ PIPE_CONTROL_CS_STALL);
+ iris_emit_pipe_control_flush(render_batch,
+ "API: texture barrier (2/2)",
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
+ }
- // XXX: compute batch?
-
- flush_depth_and_render_caches(&ice->render_batch);
+ if (compute_batch->contains_draw) {
+ iris_batch_maybe_flush(compute_batch, 48);
+ iris_emit_pipe_control_flush(compute_batch,
+ "API: texture barrier (1/2)",
+ PIPE_CONTROL_CS_STALL);
+ iris_emit_pipe_control_flush(compute_batch,
+ "API: texture barrier (2/2)",
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
+ }
}
static void
PIPE_CONTROL_CONST_CACHE_INVALIDATE;
}
- if (flags & PIPE_BARRIER_TEXTURE) {
- bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- }
-
- if (flags & PIPE_BARRIER_FRAMEBUFFER) {
+ if (flags & (PIPE_BARRIER_TEXTURE | PIPE_BARRIER_FRAMEBUFFER)) {
bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_RENDER_TARGET_FLUSH;
}
- // XXX: MAPPED_BUFFER, QUERY_BUFFER, STREAMOUT_BUFFER, GLOBAL_BUFFER?
- // XXX: compute batch?
-
- iris_emit_pipe_control_flush(&ice->render_batch, bits);
+ for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
+ if (ice->batches[i].contains_draw ||
+ ice->batches[i].cache.render->entries) {
+ iris_batch_maybe_flush(&ice->batches[i], 24);
+ iris_emit_pipe_control_flush(&ice->batches[i], "API: memory barrier",
+ bits);
+ }
+ }
}
void