radeonsi: move VS_STATE.LS_OUT_PATCH_SIZE a few bits higher to make space there
[mesa.git] / src / gallium / drivers / radeonsi / si_fence.c
index 5f478afaf638d8df8e19b5a94077ea8c693215e5..e9dd3e937f25954500182843172c9992bcbcf3f0 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright 2013-2017 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
 #include "util/u_queue.h"
 #include "util/u_upload_mgr.h"
 
-#include "si_pipe.h"
-#include "radeon/r600_cs.h"
+#include "si_build_pm4.h"
 
 struct si_fine_fence {
-       struct r600_resource *buf;
+       struct si_resource *buf;
        unsigned offset;
 };
 
@@ -46,21 +46,149 @@ struct si_multi_fence {
 
        /* If the context wasn't flushed at fence creation, this is non-NULL. */
        struct {
-               struct r600_common_context *ctx;
+               struct si_context *ctx;
                unsigned ib_index;
        } gfx_unflushed;
 
        struct si_fine_fence fine;
 };
 
-static void si_add_fence_dependency(struct r600_common_context *rctx,
+/**
+ * Write an EOP event.
+ *
+ * \param event                EVENT_TYPE_*
+ * \param event_flags  Optional cache flush flags (TC)
+ * \param dst_sel       MEM or TC_L2
+ * \param int_sel       NONE or SEND_DATA_AFTER_WR_CONFIRM
+ * \param data_sel     DISCARD, VALUE_32BIT, TIMESTAMP, or GDS
+ * \param buf          Buffer
+ * \param va           GPU address
+ * \param old_value    Previous fence value (for a bug workaround)
+ * \param new_value    Fence value to write for this event.
+ */
+void si_cp_release_mem(struct si_context *ctx, struct radeon_cmdbuf *cs,
+                      unsigned event, unsigned event_flags,
+                      unsigned dst_sel, unsigned int_sel, unsigned data_sel,
+                      struct si_resource *buf, uint64_t va,
+                      uint32_t new_fence, unsigned query_type)
+{
+       unsigned op = EVENT_TYPE(event) |
+                     EVENT_INDEX(event == V_028A90_CS_DONE ||
+                                 event == V_028A90_PS_DONE ? 6 : 5) |
+                     event_flags;
+       unsigned sel = EOP_DST_SEL(dst_sel) |
+                      EOP_INT_SEL(int_sel) |
+                      EOP_DATA_SEL(data_sel);
+       bool compute_ib = !ctx->has_graphics ||
+                         cs == ctx->prim_discard_compute_cs;
+
+       if (ctx->chip_class >= GFX9 ||
+           (compute_ib && ctx->chip_class >= GFX7)) {
+               /* A ZPASS_DONE or PIXEL_STAT_DUMP_EVENT (of the DB occlusion
+                * counters) must immediately precede every timestamp event to
+                * prevent a GPU hang on GFX9.
+                *
+                * Occlusion queries don't need to do it here, because they
+                * always do ZPASS_DONE before the timestamp.
+                */
+               if (ctx->chip_class == GFX9 && !compute_ib &&
+                   query_type != PIPE_QUERY_OCCLUSION_COUNTER &&
+                   query_type != PIPE_QUERY_OCCLUSION_PREDICATE &&
+                   query_type != PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
+                       struct si_resource *scratch = ctx->eop_bug_scratch;
+
+                       assert(16 * ctx->screen->info.num_render_backends <=
+                              scratch->b.b.width0);
+                       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
+                       radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
+                       radeon_emit(cs, scratch->gpu_address);
+                       radeon_emit(cs, scratch->gpu_address >> 32);
+
+                       radeon_add_to_buffer_list(ctx, ctx->gfx_cs, scratch,
+                                                 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
+               }
+
+               radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, ctx->chip_class >= GFX9 ? 6 : 5, 0));
+               radeon_emit(cs, op);
+               radeon_emit(cs, sel);
+               radeon_emit(cs, va);            /* address lo */
+               radeon_emit(cs, va >> 32);      /* address hi */
+               radeon_emit(cs, new_fence);     /* immediate data lo */
+               radeon_emit(cs, 0); /* immediate data hi */
+               if (ctx->chip_class >= GFX9)
+                       radeon_emit(cs, 0); /* unused */
+       } else {
+               if (ctx->chip_class == GFX7 ||
+                   ctx->chip_class == GFX8) {
+                       struct si_resource *scratch = ctx->eop_bug_scratch;
+                       uint64_t va = scratch->gpu_address;
+
+                       /* Two EOP events are required to make all engines go idle
+                        * (and optional cache flushes executed) before the timestamp
+                        * is written.
+                        */
+                       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
+                       radeon_emit(cs, op);
+                       radeon_emit(cs, va);
+                       radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
+                       radeon_emit(cs, 0); /* immediate data */
+                       radeon_emit(cs, 0); /* unused */
+
+                       radeon_add_to_buffer_list(ctx, ctx->gfx_cs, scratch,
+                                                 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
+               }
+
+               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
+               radeon_emit(cs, op);
+               radeon_emit(cs, va);
+               radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
+               radeon_emit(cs, new_fence); /* immediate data */
+               radeon_emit(cs, 0); /* unused */
+       }
+
+       if (buf) {
+               radeon_add_to_buffer_list(ctx, ctx->gfx_cs, buf, RADEON_USAGE_WRITE,
+                                         RADEON_PRIO_QUERY);
+       }
+}
+
+unsigned si_cp_write_fence_dwords(struct si_screen *screen)
+{
+       unsigned dwords = 6;
+
+       if (screen->info.chip_class == GFX7 ||
+           screen->info.chip_class == GFX8)
+               dwords *= 2;
+
+       return dwords;
+}
+
+void si_cp_wait_mem(struct si_context *ctx, struct radeon_cmdbuf *cs,
+                   uint64_t va, uint32_t ref, uint32_t mask, unsigned flags)
+{
+       radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
+       radeon_emit(cs, WAIT_REG_MEM_MEM_SPACE(1) | flags);
+       radeon_emit(cs, va);
+       radeon_emit(cs, va >> 32);
+       radeon_emit(cs, ref); /* reference value */
+       radeon_emit(cs, mask); /* mask */
+       radeon_emit(cs, 4); /* poll interval */
+}
+
+static void si_add_fence_dependency(struct si_context *sctx,
                                    struct pipe_fence_handle *fence)
 {
-       struct radeon_winsys *ws = rctx->ws;
+       struct radeon_winsys *ws = sctx->ws;
+
+       if (sctx->sdma_cs)
+               ws->cs_add_fence_dependency(sctx->sdma_cs, fence, 0);
+       ws->cs_add_fence_dependency(sctx->gfx_cs, fence, 0);
+}
 
-       if (rctx->dma.cs)
-               ws->cs_add_fence_dependency(rctx->dma.cs, fence);
-       ws->cs_add_fence_dependency(rctx->gfx.cs, fence);
+static void si_add_syncobj_signal(struct si_context *sctx,
+                                 struct pipe_fence_handle *fence)
+{
+       sctx->ws->cs_add_syncobj_signal(sctx->gfx_cs, fence);
 }
 
 static void si_fence_reference(struct pipe_screen *screen,
@@ -68,17 +196,17 @@ static void si_fence_reference(struct pipe_screen *screen,
                               struct pipe_fence_handle *src)
 {
        struct radeon_winsys *ws = ((struct si_screen*)screen)->ws;
-       struct si_multi_fence **rdst = (struct si_multi_fence **)dst;
-       struct si_multi_fence *rsrc = (struct si_multi_fence *)src;
-
-       if (pipe_reference(&(*rdst)->reference, &rsrc->reference)) {
-               ws->fence_reference(&(*rdst)->gfx, NULL);
-               ws->fence_reference(&(*rdst)->sdma, NULL);
-               tc_unflushed_batch_token_reference(&(*rdst)->tc_token, NULL);
-               r600_resource_reference(&(*rdst)->fine.buf, NULL);
-               FREE(*rdst);
+       struct si_multi_fence **sdst = (struct si_multi_fence **)dst;
+       struct si_multi_fence *ssrc = (struct si_multi_fence *)src;
+
+       if (pipe_reference(&(*sdst)->reference, &ssrc->reference)) {
+               ws->fence_reference(&(*sdst)->gfx, NULL);
+               ws->fence_reference(&(*sdst)->sdma, NULL);
+               tc_unflushed_batch_token_reference(&(*sdst)->tc_token, NULL);
+               si_resource_reference(&(*sdst)->fine.buf, NULL);
+               FREE(*sdst);
        }
-        *rdst = rsrc;
+        *sdst = ssrc;
 }
 
 static struct si_multi_fence *si_create_multi_fence()
@@ -106,30 +234,6 @@ struct pipe_fence_handle *si_create_fence(struct pipe_context *ctx,
        return (struct pipe_fence_handle *)fence;
 }
 
-static void si_fence_server_sync(struct pipe_context *ctx,
-                                struct pipe_fence_handle *fence)
-{
-       struct r600_common_context *rctx = (struct r600_common_context *)ctx;
-       struct si_multi_fence *rfence = (struct si_multi_fence *)fence;
-
-       util_queue_fence_wait(&rfence->ready);
-
-       /* Unflushed fences from the same context are no-ops. */
-       if (rfence->gfx_unflushed.ctx &&
-           rfence->gfx_unflushed.ctx == rctx)
-               return;
-
-       /* All unflushed commands will not start execution before
-        * this fence dependency is signalled.
-        *
-        * Should we flush the context to allow more GPU parallelism?
-        */
-       if (rfence->sdma)
-               si_add_fence_dependency(rctx, rfence->sdma);
-       if (rfence->gfx)
-               si_add_fence_dependency(rctx, rfence->gfx);
-}
-
 static bool si_fine_fence_signaled(struct radeon_winsys *rws,
                                   const struct si_fine_fence *fine)
 {
@@ -150,48 +254,50 @@ static void si_fine_fence_set(struct si_context *ctx,
 
        assert(util_bitcount(flags & (PIPE_FLUSH_TOP_OF_PIPE | PIPE_FLUSH_BOTTOM_OF_PIPE)) == 1);
 
-       /* Use uncached system memory for the fence. */
-       u_upload_alloc(ctx->b.b.stream_uploader, 0, 4, 4,
+       /* Use cached system memory for the fence. */
+       u_upload_alloc(ctx->cached_gtt_allocator, 0, 4, 4,
                       &fine->offset, (struct pipe_resource **)&fine->buf, (void **)&fence_ptr);
        if (!fine->buf)
                return;
 
        *fence_ptr = 0;
 
-       uint64_t fence_va = fine->buf->gpu_address + fine->offset;
-
-       radeon_add_to_buffer_list(&ctx->b, &ctx->b.gfx, fine->buf,
-                                 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
        if (flags & PIPE_FLUSH_TOP_OF_PIPE) {
-               struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
-               radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
-               radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
-                       S_370_WR_CONFIRM(1) |
-                       S_370_ENGINE_SEL(V_370_PFP));
-               radeon_emit(cs, fence_va);
-               radeon_emit(cs, fence_va >> 32);
-               radeon_emit(cs, 0x80000000);
+               uint32_t value = 0x80000000;
+
+               si_cp_write_data(ctx, fine->buf, fine->offset, 4,
+                                V_370_MEM, V_370_PFP, &value);
        } else if (flags & PIPE_FLUSH_BOTTOM_OF_PIPE) {
-               si_gfx_write_event_eop(&ctx->b, V_028A90_BOTTOM_OF_PIPE_TS, 0,
-                                      EOP_DATA_SEL_VALUE_32BIT,
-                                      NULL, fence_va, 0x80000000,
-                                      PIPE_QUERY_GPU_FINISHED);
+               uint64_t fence_va = fine->buf->gpu_address + fine->offset;
+
+               radeon_add_to_buffer_list(ctx, ctx->gfx_cs, fine->buf,
+                                         RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
+               si_cp_release_mem(ctx, ctx->gfx_cs,
+                                 V_028A90_BOTTOM_OF_PIPE_TS, 0,
+                                 EOP_DST_SEL_MEM, EOP_INT_SEL_NONE,
+                                 EOP_DATA_SEL_VALUE_32BIT,
+                                 NULL, fence_va, 0x80000000,
+                                 PIPE_QUERY_GPU_FINISHED);
        } else {
                assert(false);
        }
 }
 
-static boolean si_fence_finish(struct pipe_screen *screen,
-                              struct pipe_context *ctx,
-                              struct pipe_fence_handle *fence,
-                              uint64_t timeout)
+static bool si_fence_finish(struct pipe_screen *screen,
+                           struct pipe_context *ctx,
+                           struct pipe_fence_handle *fence,
+                           uint64_t timeout)
 {
        struct radeon_winsys *rws = ((struct si_screen*)screen)->ws;
-       struct si_multi_fence *rfence = (struct si_multi_fence *)fence;
+       struct si_multi_fence *sfence = (struct si_multi_fence *)fence;
+       struct si_context *sctx;
        int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
 
-       if (!util_queue_fence_is_signalled(&rfence->ready)) {
-               if (rfence->tc_token) {
+       ctx = threaded_context_unwrap_sync(ctx);
+       sctx = (struct si_context*)(ctx ? ctx : NULL);
+
+       if (!util_queue_fence_is_signalled(&sfence->ready)) {
+               if (sfence->tc_token) {
                        /* Ensure that si_flush_from_st will be called for
                         * this fence, but only if we're in the API thread
                         * where the context is current.
@@ -200,7 +306,7 @@ static boolean si_fence_finish(struct pipe_screen *screen,
                         * be in flight in the driver thread, so the fence
                         * may not be ready yet when this call returns.
                         */
-                       threaded_context_flush(ctx, rfence->tc_token,
+                       threaded_context_flush(ctx, sfence->tc_token,
                                               timeout == 0);
                }
 
@@ -208,9 +314,9 @@ static boolean si_fence_finish(struct pipe_screen *screen,
                        return false;
 
                if (timeout == PIPE_TIMEOUT_INFINITE) {
-                       util_queue_fence_wait(&rfence->ready);
+                       util_queue_fence_wait(&sfence->ready);
                } else {
-                       if (!util_queue_fence_wait_timeout(&rfence->ready, abs_timeout))
+                       if (!util_queue_fence_wait_timeout(&sfence->ready, abs_timeout))
                                return false;
                }
 
@@ -220,8 +326,8 @@ static boolean si_fence_finish(struct pipe_screen *screen,
                }
        }
 
-       if (rfence->sdma) {
-               if (!rws->fence_wait(rws, rfence->sdma, timeout))
+       if (sfence->sdma) {
+               if (!rws->fence_wait(rws, sfence->sdma, timeout))
                        return false;
 
                /* Recompute the timeout after waiting. */
@@ -231,95 +337,109 @@ static boolean si_fence_finish(struct pipe_screen *screen,
                }
        }
 
-       if (!rfence->gfx)
+       if (!sfence->gfx)
                return true;
 
-       if (rfence->fine.buf &&
-           si_fine_fence_signaled(rws, &rfence->fine)) {
-               rws->fence_reference(&rfence->gfx, NULL);
-               r600_resource_reference(&rfence->fine.buf, NULL);
+       if (sfence->fine.buf &&
+           si_fine_fence_signaled(rws, &sfence->fine)) {
+               rws->fence_reference(&sfence->gfx, NULL);
+               si_resource_reference(&sfence->fine.buf, NULL);
                return true;
        }
 
        /* Flush the gfx IB if it hasn't been flushed yet. */
-       if (ctx && rfence->gfx_unflushed.ctx) {
-               struct si_context *sctx;
-
-               sctx = (struct si_context *)threaded_context_unwrap_unsync(ctx);
-               if (rfence->gfx_unflushed.ctx == &sctx->b &&
-                   rfence->gfx_unflushed.ib_index == sctx->b.num_gfx_cs_flushes) {
-                       /* Section 4.1.2 (Signaling) of the OpenGL 4.6 (Core profile)
-                        * spec says:
-                        *
-                        *    "If the sync object being blocked upon will not be
-                        *     signaled in finite time (for example, by an associated
-                        *     fence command issued previously, but not yet flushed to
-                        *     the graphics pipeline), then ClientWaitSync may hang
-                        *     forever. To help prevent this behavior, if
-                        *     ClientWaitSync is called and all of the following are
-                        *     true:
-                        *
-                        *     * the SYNC_FLUSH_COMMANDS_BIT bit is set in flags,
-                        *     * sync is unsignaled when ClientWaitSync is called,
-                        *     * and the calls to ClientWaitSync and FenceSync were
-                        *       issued from the same context,
-                        *
-                        *     then the GL will behave as if the equivalent of Flush
-                        *     were inserted immediately after the creation of sync."
-                        *
-                        * This means we need to flush for such fences even when we're
-                        * not going to wait.
-                        */
-                       threaded_context_unwrap_sync(ctx);
-                       sctx->b.gfx.flush(&sctx->b, timeout ? 0 : RADEON_FLUSH_ASYNC, NULL);
-                       rfence->gfx_unflushed.ctx = NULL;
+       if (sctx && sfence->gfx_unflushed.ctx == sctx &&
+           sfence->gfx_unflushed.ib_index == sctx->num_gfx_cs_flushes) {
+               /* Section 4.1.2 (Signaling) of the OpenGL 4.6 (Core profile)
+                * spec says:
+                *
+                *    "If the sync object being blocked upon will not be
+                *     signaled in finite time (for example, by an associated
+                *     fence command issued previously, but not yet flushed to
+                *     the graphics pipeline), then ClientWaitSync may hang
+                *     forever. To help prevent this behavior, if
+                *     ClientWaitSync is called and all of the following are
+                *     true:
+                *
+                *     * the SYNC_FLUSH_COMMANDS_BIT bit is set in flags,
+                *     * sync is unsignaled when ClientWaitSync is called,
+                *     * and the calls to ClientWaitSync and FenceSync were
+                *       issued from the same context,
+                *
+                *     then the GL will behave as if the equivalent of Flush
+                *     were inserted immediately after the creation of sync."
+                *
+                * This means we need to flush for such fences even when we're
+                * not going to wait.
+                */
+               si_flush_gfx_cs(sctx,
+                               (timeout ? 0 : PIPE_FLUSH_ASYNC) |
+                                RADEON_FLUSH_START_NEXT_GFX_IB_NOW,
+                               NULL);
+               sfence->gfx_unflushed.ctx = NULL;
 
-                       if (!timeout)
-                               return false;
+               if (!timeout)
+                       return false;
 
-                       /* Recompute the timeout after all that. */
-                       if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
-                               int64_t time = os_time_get_nano();
-                               timeout = abs_timeout > time ? abs_timeout - time : 0;
-                       }
+               /* Recompute the timeout after all that. */
+               if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
+                       int64_t time = os_time_get_nano();
+                       timeout = abs_timeout > time ? abs_timeout - time : 0;
                }
        }
 
-       if (rws->fence_wait(rws, rfence->gfx, timeout))
+       if (rws->fence_wait(rws, sfence->gfx, timeout))
                return true;
 
        /* Re-check in case the GPU is slow or hangs, but the commands before
         * the fine-grained fence have completed. */
-       if (rfence->fine.buf &&
-           si_fine_fence_signaled(rws, &rfence->fine))
+       if (sfence->fine.buf &&
+           si_fine_fence_signaled(rws, &sfence->fine))
                return true;
 
        return false;
 }
 
 static void si_create_fence_fd(struct pipe_context *ctx,
-                              struct pipe_fence_handle **pfence, int fd)
+                              struct pipe_fence_handle **pfence, int fd,
+                              enum pipe_fd_type type)
 {
        struct si_screen *sscreen = (struct si_screen*)ctx->screen;
        struct radeon_winsys *ws = sscreen->ws;
-       struct si_multi_fence *rfence;
+       struct si_multi_fence *sfence;
 
        *pfence = NULL;
 
-       if (!sscreen->info.has_sync_file)
+       sfence = si_create_multi_fence();
+       if (!sfence)
                return;
 
-       rfence = si_create_multi_fence();
-       if (!rfence)
-               return;
+       switch (type) {
+       case PIPE_FD_TYPE_NATIVE_SYNC:
+               if (!sscreen->info.has_fence_to_handle)
+                       goto finish;
+
+               sfence->gfx = ws->fence_import_sync_file(ws, fd);
+               break;
 
-       rfence->gfx = ws->fence_import_sync_file(ws, fd);
-       if (!rfence->gfx) {
-               FREE(rfence);
+       case PIPE_FD_TYPE_SYNCOBJ:
+               if (!sscreen->info.has_syncobj)
+                       goto finish;
+
+               sfence->gfx = ws->fence_import_syncobj(ws, fd);
+               break;
+
+       default:
+               unreachable("bad fence fd type when importing");
+       }
+
+finish:
+       if (!sfence->gfx) {
+               FREE(sfence);
                return;
        }
 
-       *pfence = (struct pipe_fence_handle*)rfence;
+       *pfence = (struct pipe_fence_handle*)sfence;
 }
 
 static int si_fence_get_fd(struct pipe_screen *screen,
@@ -327,26 +447,26 @@ static int si_fence_get_fd(struct pipe_screen *screen,
 {
        struct si_screen *sscreen = (struct si_screen*)screen;
        struct radeon_winsys *ws = sscreen->ws;
-       struct si_multi_fence *rfence = (struct si_multi_fence *)fence;
+       struct si_multi_fence *sfence = (struct si_multi_fence *)fence;
        int gfx_fd = -1, sdma_fd = -1;
 
-       if (!sscreen->info.has_sync_file)
+       if (!sscreen->info.has_fence_to_handle)
                return -1;
 
-       util_queue_fence_wait(&rfence->ready);
+       util_queue_fence_wait(&sfence->ready);
 
        /* Deferred fences aren't supported. */
-       assert(!rfence->gfx_unflushed.ctx);
-       if (rfence->gfx_unflushed.ctx)
+       assert(!sfence->gfx_unflushed.ctx);
+       if (sfence->gfx_unflushed.ctx)
                return -1;
 
-       if (rfence->sdma) {
-               sdma_fd = ws->fence_export_sync_file(ws, rfence->sdma);
+       if (sfence->sdma) {
+               sdma_fd = ws->fence_export_sync_file(ws, sfence->sdma);
                if (sdma_fd == -1)
                        return -1;
        }
-       if (rfence->gfx) {
-               gfx_fd = ws->fence_export_sync_file(ws, rfence->gfx);
+       if (sfence->gfx) {
+               gfx_fd = ws->fence_export_sync_file(ws, sfence->gfx);
                if (gfx_fd == -1) {
                        if (sdma_fd != -1)
                                close(sdma_fd);
@@ -356,6 +476,8 @@ static int si_fence_get_fd(struct pipe_screen *screen,
 
        /* If we don't have FDs at this point, it means we don't have fences
         * either. */
+       if (sdma_fd == -1 && gfx_fd == -1)
+               return ws->export_signalled_sync_file(ws);
        if (sdma_fd == -1)
                return gfx_fd;
        if (gfx_fd == -1)
@@ -372,33 +494,33 @@ static void si_flush_from_st(struct pipe_context *ctx,
                             unsigned flags)
 {
        struct pipe_screen *screen = ctx->screen;
-       struct r600_common_context *rctx = (struct r600_common_context *)ctx;
-       struct radeon_winsys *ws = rctx->ws;
+       struct si_context *sctx = (struct si_context *)ctx;
+       struct radeon_winsys *ws = sctx->ws;
        struct pipe_fence_handle *gfx_fence = NULL;
        struct pipe_fence_handle *sdma_fence = NULL;
        bool deferred_fence = false;
        struct si_fine_fence fine = {};
-       unsigned rflags = RADEON_FLUSH_ASYNC;
+       unsigned rflags = PIPE_FLUSH_ASYNC;
 
        if (flags & PIPE_FLUSH_END_OF_FRAME)
-               rflags |= RADEON_FLUSH_END_OF_FRAME;
+               rflags |= PIPE_FLUSH_END_OF_FRAME;
 
        if (flags & (PIPE_FLUSH_TOP_OF_PIPE | PIPE_FLUSH_BOTTOM_OF_PIPE)) {
                assert(flags & PIPE_FLUSH_DEFERRED);
                assert(fence);
 
-               si_fine_fence_set((struct si_context *)rctx, &fine, flags);
+               si_fine_fence_set(sctx, &fine, flags);
        }
 
        /* DMA IBs are preambles to gfx IBs, therefore must be flushed first. */
-       if (rctx->dma.cs)
-               rctx->dma.flush(rctx, rflags, fence ? &sdma_fence : NULL);
+       if (sctx->sdma_cs)
+               si_flush_dma_cs(sctx, rflags, fence ? &sdma_fence : NULL);
 
-       if (!radeon_emitted(rctx->gfx.cs, rctx->initial_gfx_cs_size)) {
+       if (!radeon_emitted(sctx->gfx_cs, sctx->initial_gfx_cs_size)) {
                if (fence)
-                       ws->fence_reference(&gfx_fence, rctx->last_gfx_fence);
+                       ws->fence_reference(&gfx_fence, sctx->last_gfx_fence);
                if (!(flags & PIPE_FLUSH_DEFERRED))
-                       ws->cs_sync_flush(rctx->gfx.cs);
+                       ws->cs_sync_flush(sctx->gfx_cs);
        } else {
                /* Instead of flushing, create a deferred fence. Constraints:
                 * - The state tracker must allow a deferred flush.
@@ -409,10 +531,10 @@ static void si_flush_from_st(struct pipe_context *ctx,
                if (flags & PIPE_FLUSH_DEFERRED &&
                    !(flags & PIPE_FLUSH_FENCE_FD) &&
                    fence) {
-                       gfx_fence = rctx->ws->cs_get_next_fence(rctx->gfx.cs);
+                       gfx_fence = sctx->ws->cs_get_next_fence(sctx->gfx_cs);
                        deferred_fence = true;
                } else {
-                       rctx->gfx.flush(rctx, rflags, fence ? &gfx_fence : NULL);
+                       si_flush_gfx_cs(sctx, rflags, fence ? &gfx_fence : NULL);
                }
        }
 
@@ -440,8 +562,8 @@ static void si_flush_from_st(struct pipe_context *ctx,
                multi_fence->sdma = sdma_fence;
 
                if (deferred_fence) {
-                       multi_fence->gfx_unflushed.ctx = rctx;
-                       multi_fence->gfx_unflushed.ib_index = rctx->num_gfx_cs_flushes;
+                       multi_fence->gfx_unflushed.ctx = sctx;
+                       multi_fence->gfx_unflushed.ib_index = sctx->num_gfx_cs_flushes;
                }
 
                multi_fence->fine = fine;
@@ -454,18 +576,72 @@ static void si_flush_from_st(struct pipe_context *ctx,
        }
        assert(!fine.buf);
 finish:
-       if (!(flags & PIPE_FLUSH_DEFERRED)) {
-               if (rctx->dma.cs)
-                       ws->cs_sync_flush(rctx->dma.cs);
-               ws->cs_sync_flush(rctx->gfx.cs);
+       if (!(flags & (PIPE_FLUSH_DEFERRED | PIPE_FLUSH_ASYNC))) {
+               if (sctx->sdma_cs)
+                       ws->cs_sync_flush(sctx->sdma_cs);
+               ws->cs_sync_flush(sctx->gfx_cs);
        }
 }
 
+static void si_fence_server_signal(struct pipe_context *ctx,
+                                  struct pipe_fence_handle *fence)
+{
+       struct si_context *sctx = (struct si_context *)ctx;
+       struct si_multi_fence *sfence = (struct si_multi_fence *)fence;
+
+       /* We should have at least one syncobj to signal */
+       assert(sfence->sdma || sfence->gfx);
+
+       if (sfence->sdma)
+               si_add_syncobj_signal(sctx, sfence->sdma);
+       if (sfence->gfx)
+               si_add_syncobj_signal(sctx, sfence->gfx);
+
+       /**
+        * The spec does not require a flush here. We insert a flush
+        * because syncobj based signals are not directly placed into
+        * the command stream. Instead the signal happens when the
+        * submission associated with the syncobj finishes execution.
+        *
+        * Therefore, we must make sure that we flush the pipe to avoid
+        * new work being emitted and getting executed before the signal
+        * operation.
+        */
+       si_flush_from_st(ctx, NULL, PIPE_FLUSH_ASYNC);
+}
+
+static void si_fence_server_sync(struct pipe_context *ctx,
+                                struct pipe_fence_handle *fence)
+{
+       struct si_context *sctx = (struct si_context *)ctx;
+       struct si_multi_fence *sfence = (struct si_multi_fence *)fence;
+
+       util_queue_fence_wait(&sfence->ready);
+
+       /* Unflushed fences from the same context are no-ops. */
+       if (sfence->gfx_unflushed.ctx &&
+           sfence->gfx_unflushed.ctx == sctx)
+               return;
+
+       /* All unflushed commands will not start execution before
+        * this fence dependency is signalled.
+        *
+        * Therefore we must flush before inserting the dependency
+        */
+       si_flush_from_st(ctx, NULL, PIPE_FLUSH_ASYNC);
+
+       if (sfence->sdma)
+               si_add_fence_dependency(sctx, sfence->sdma);
+       if (sfence->gfx)
+               si_add_fence_dependency(sctx, sfence->gfx);
+}
+
 void si_init_fence_functions(struct si_context *ctx)
 {
-       ctx->b.b.flush = si_flush_from_st;
-       ctx->b.b.create_fence_fd = si_create_fence_fd;
-       ctx->b.b.fence_server_sync = si_fence_server_sync;
+       ctx->b.flush = si_flush_from_st;
+       ctx->b.create_fence_fd = si_create_fence_fd;
+       ctx->b.fence_server_sync = si_fence_server_sync;
+       ctx->b.fence_server_signal = si_fence_server_signal;
 }
 
 void si_init_screen_fence_functions(struct si_screen *screen)