* given generation.
*/
void
-iris_emit_pipe_control_flush(struct iris_batch *batch, uint32_t flags)
+iris_emit_pipe_control_flush(struct iris_batch *batch,
+ const char *reason,
+ uint32_t flags)
{
if ((flags & PIPE_CONTROL_CACHE_FLUSH_BITS) &&
(flags & PIPE_CONTROL_CACHE_INVALIDATE_BITS)) {
* with any write cache flush, so this shouldn't be a concern. In order
* to ensure a full stall, we do an end-of-pipe sync.
*/
- iris_emit_end_of_pipe_sync(batch, flags & PIPE_CONTROL_CACHE_FLUSH_BITS);
+ iris_emit_end_of_pipe_sync(batch, reason,
+ flags & PIPE_CONTROL_CACHE_FLUSH_BITS);
flags &= ~(PIPE_CONTROL_CACHE_FLUSH_BITS | PIPE_CONTROL_CS_STALL);
}
- batch->vtbl->emit_raw_pipe_control(batch, flags, NULL, 0, 0);
+ batch->vtbl->emit_raw_pipe_control(batch, reason, flags, NULL, 0, 0);
}
/**
* - PIPE_CONTROL_WRITE_DEPTH_COUNT
*/
void
-iris_emit_pipe_control_write(struct iris_batch *batch, uint32_t flags,
+iris_emit_pipe_control_write(struct iris_batch *batch,
+ const char *reason, uint32_t flags,
struct iris_bo *bo, uint32_t offset,
uint64_t imm)
{
- batch->vtbl->emit_raw_pipe_control(batch, flags, bo, offset, imm);
+ batch->vtbl->emit_raw_pipe_control(batch, reason, flags, bo, offset, imm);
}
/*
* Data" in the PIPE_CONTROL command.
*/
void
-iris_emit_end_of_pipe_sync(struct iris_batch *batch, uint32_t flags)
+iris_emit_end_of_pipe_sync(struct iris_batch *batch,
+ const char *reason, uint32_t flags)
{
/* From Sandybridge PRM, volume 2, "1.7.3.1 Writing a Value to Memory":
*
* Data, Required Write Cache Flush bits set)
* - Workload-2 (Can use the data produce or output by Workload-1)
*/
- iris_emit_pipe_control_write(batch, flags | PIPE_CONTROL_CS_STALL |
+ iris_emit_pipe_control_write(batch, reason,
+ flags | PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_WRITE_IMMEDIATE,
batch->screen->workaround_bo, 0, 0);
}
iris_texture_barrier(struct pipe_context *ctx, unsigned flags)
{
struct iris_context *ice = (void *) ctx;
+ struct iris_batch *render_batch = &ice->batches[IRIS_BATCH_RENDER];
+ struct iris_batch *compute_batch = &ice->batches[IRIS_BATCH_COMPUTE];
+
+ if (render_batch->contains_draw ||
+ render_batch->cache.render->entries ||
+ render_batch->cache.depth->entries) {
+ iris_batch_maybe_flush(render_batch, 48);
+ iris_emit_pipe_control_flush(render_batch,
+ "API: texture barrier (1/2)",
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_RENDER_TARGET_FLUSH |
+ PIPE_CONTROL_CS_STALL);
+ iris_emit_pipe_control_flush(render_batch,
+ "API: texture barrier (2/2)",
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
+ }
- // XXX: compute batch?
-
- iris_flush_depth_and_render_caches(&ice->render_batch);
+ if (compute_batch->contains_draw) {
+ iris_batch_maybe_flush(compute_batch, 48);
+ iris_emit_pipe_control_flush(compute_batch,
+ "API: texture barrier (1/2)",
+ PIPE_CONTROL_CS_STALL);
+ iris_emit_pipe_control_flush(compute_batch,
+ "API: texture barrier (2/2)",
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
+ }
}
static void
PIPE_CONTROL_CONST_CACHE_INVALIDATE;
}
- if (flags & PIPE_BARRIER_TEXTURE) {
- bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- }
-
- if (flags & PIPE_BARRIER_FRAMEBUFFER) {
+ if (flags & (PIPE_BARRIER_TEXTURE | PIPE_BARRIER_FRAMEBUFFER)) {
bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_RENDER_TARGET_FLUSH;
}
- // XXX: MAPPED_BUFFER, QUERY_BUFFER, STREAMOUT_BUFFER, GLOBAL_BUFFER?
-
- // XXX: don't unconditionally emit flushes in both engines, we don't
- // even know if we're even using e.g. the compute engine...
-
- if (ice->render_batch.contains_draw)
- iris_emit_pipe_control_flush(&ice->render_batch, bits);
- if (ice->compute_batch.contains_draw)
- iris_emit_pipe_control_flush(&ice->compute_batch, bits);
+ for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
+ if (ice->batches[i].contains_draw ||
+ ice->batches[i].cache.render->entries) {
+ iris_batch_maybe_flush(&ice->batches[i], 24);
+ iris_emit_pipe_control_flush(&ice->batches[i], "API: memory barrier",
+ bits);
+ }
+ }
}
void