+
+static void
+iris_texture_barrier(struct pipe_context *ctx, unsigned flags)
+{
+ struct iris_context *ice = (void *) ctx;
+ struct iris_batch *render_batch = &ice->batches[IRIS_BATCH_RENDER];
+ struct iris_batch *compute_batch = &ice->batches[IRIS_BATCH_COMPUTE];
+
+ if (render_batch->contains_draw ||
+ render_batch->cache.render->entries ||
+ render_batch->cache.depth->entries) {
+ iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_RENDER],
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_RENDER_TARGET_FLUSH |
+ PIPE_CONTROL_CS_STALL);
+ iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_RENDER],
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
+ }
+
+ if (compute_batch->contains_draw) {
+ iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_COMPUTE],
+ PIPE_CONTROL_CS_STALL);
+ iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_COMPUTE],
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
+ }
+}
+
+static void
+iris_memory_barrier(struct pipe_context *ctx, unsigned flags)
+{
+ struct iris_context *ice = (void *) ctx;
+ unsigned bits = PIPE_CONTROL_DATA_CACHE_FLUSH | PIPE_CONTROL_CS_STALL;
+
+ if (flags & (PIPE_BARRIER_VERTEX_BUFFER |
+ PIPE_BARRIER_INDEX_BUFFER |
+ PIPE_BARRIER_INDIRECT_BUFFER)) {
+ bits |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ }
+
+ if (flags & PIPE_BARRIER_CONSTANT_BUFFER) {
+ bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
+ PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ }
+
+ if (flags & (PIPE_BARRIER_TEXTURE | PIPE_BARRIER_FRAMEBUFFER)) {
+ bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
+ PIPE_CONTROL_RENDER_TARGET_FLUSH;
+ }
+
+ for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
+ if (ice->batches[i].contains_draw ||
+ ice->batches[i].cache.render->entries)
+ iris_emit_pipe_control_flush(&ice->batches[i], bits);
+ }
+}
+
+void
+iris_init_flush_functions(struct pipe_context *ctx)
+{
+ ctx->memory_barrier = iris_memory_barrier;
+ ctx->texture_barrier = iris_texture_barrier;
+}