radeonsi: use r600_common_context less pt1
authorMarek Olšák <marek.olsak@amd.com>
Sun, 1 Apr 2018 19:37:11 +0000 (15:37 -0400)
committerMarek Olšák <marek.olsak@amd.com>
Thu, 5 Apr 2018 19:34:58 +0000 (15:34 -0400)
Acked-by: Timothy Arceri <tarceri@itsqueeze.com>
src/gallium/drivers/radeon/r600_buffer_common.c
src/gallium/drivers/radeon/r600_pipe_common.c
src/gallium/drivers/radeonsi/cik_sdma.c
src/gallium/drivers/radeonsi/si_debug.c
src/gallium/drivers/radeonsi/si_dma.c
src/gallium/drivers/radeonsi/si_dma_cs.c
src/gallium/drivers/radeonsi/si_fence.c
src/gallium/drivers/radeonsi/si_gfx_cs.c
src/gallium/drivers/radeonsi/si_pipe.h

index 3789bccd6d466f9e85de73f9b1431028e2851f68..8a4ad2dc6db63653c4dd8b874efea00136dc1b70 100644 (file)
@@ -46,6 +46,7 @@ void *si_buffer_map_sync_with_rings(struct r600_common_context *ctx,
                                    struct r600_resource *resource,
                                    unsigned usage)
 {
+       struct si_context *sctx = (struct si_context*)ctx;
        enum radeon_bo_usage rusage = RADEON_USAGE_READWRITE;
        bool busy = false;
 
@@ -75,10 +76,10 @@ void *si_buffer_map_sync_with_rings(struct r600_common_context *ctx,
            ctx->ws->cs_is_buffer_referenced(ctx->dma_cs,
                                             resource->buf, rusage)) {
                if (usage & PIPE_TRANSFER_DONTBLOCK) {
-                       si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
+                       si_flush_dma_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
                        return NULL;
                } else {
-                       si_flush_dma_cs(ctx, 0, NULL);
+                       si_flush_dma_cs(sctx, 0, NULL);
                        busy = true;
                }
        }
index afea9484a24dd8ae7ade22c01e610cd9666f824d..0db88729dcc7aedea671e8930c6087dce1b20de2 100644 (file)
@@ -79,7 +79,7 @@ static bool r600_resource_commit(struct pipe_context *pctx,
                                 unsigned level, struct pipe_box *box,
                                 bool commit)
 {
-       struct r600_common_context *ctx = (struct r600_common_context *)pctx;
+       struct si_context *ctx = (struct si_context *)pctx;
        struct r600_resource *res = r600_resource(resource);
 
        /*
@@ -89,23 +89,23 @@ static bool r600_resource_commit(struct pipe_context *pctx,
         * (b) wait for threaded submit to finish, including those that were
         *     triggered by some other, earlier operation.
         */
-       if (radeon_emitted(ctx->gfx_cs, ctx->initial_gfx_cs_size) &&
-           ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs,
-                                            res->buf, RADEON_USAGE_READWRITE)) {
+       if (radeon_emitted(ctx->b.gfx_cs, ctx->b.initial_gfx_cs_size) &&
+           ctx->b.ws->cs_is_buffer_referenced(ctx->b.gfx_cs,
+                                              res->buf, RADEON_USAGE_READWRITE)) {
                si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
        }
-       if (radeon_emitted(ctx->dma_cs, 0) &&
-           ctx->ws->cs_is_buffer_referenced(ctx->dma_cs,
-                                            res->buf, RADEON_USAGE_READWRITE)) {
+       if (radeon_emitted(ctx->b.dma_cs, 0) &&
+           ctx->b.ws->cs_is_buffer_referenced(ctx->b.dma_cs,
+                                              res->buf, RADEON_USAGE_READWRITE)) {
                si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
        }
 
-       ctx->ws->cs_sync_flush(ctx->dma_cs);
-       ctx->ws->cs_sync_flush(ctx->gfx_cs);
+       ctx->b.ws->cs_sync_flush(ctx->b.dma_cs);
+       ctx->b.ws->cs_sync_flush(ctx->b.gfx_cs);
 
        assert(resource->target == PIPE_BUFFER);
 
-       return ctx->ws->buffer_commit(res->buf, box->x, box->width, commit);
+       return ctx->b.ws->buffer_commit(res->buf, box->x, box->width, commit);
 }
 
 bool si_common_context_init(struct r600_common_context *rctx,
@@ -175,7 +175,7 @@ bool si_common_context_init(struct r600_common_context *rctx,
 
        if (sscreen->info.num_sdma_rings && !(sscreen->debug_flags & DBG(NO_ASYNC_DMA))) {
                rctx->dma_cs = rctx->ws->cs_create(rctx->ctx, RING_DMA,
-                                                  si_flush_dma_cs,
+                                                  (void*)si_flush_dma_cs,
                                                   rctx);
        }
 
index 75d1927b183f833051c8def33f68811abefad45e..a5b80897a6bc5f458c0db979347fe5a87a0bd7ff 100644 (file)
@@ -47,7 +47,7 @@ static void cik_sdma_copy_buffer(struct si_context *ctx,
        src_offset += rsrc->gpu_address;
 
        ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE);
-       si_need_dma_space(&ctx->b, ncopy * 7, rdst, rsrc);
+       si_need_dma_space(ctx, ncopy * 7, rdst, rsrc);
 
        for (i = 0; i < ncopy; i++) {
                csize = MIN2(size, CIK_SDMA_COPY_MAX_SIZE);
@@ -92,7 +92,7 @@ static void cik_sdma_clear_buffer(struct pipe_context *ctx,
 
        /* the same maximum size as for copying */
        ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE);
-       si_need_dma_space(&sctx->b, ncopy * 5, rdst, NULL);
+       si_need_dma_space(sctx, ncopy * 5, rdst, NULL);
 
        for (i = 0; i < ncopy; i++) {
                csize = MIN2(size, CIK_SDMA_COPY_MAX_SIZE);
@@ -232,7 +232,7 @@ static bool cik_sdma_copy_texture(struct si_context *sctx,
              srcy + copy_height != (1 << 14)))) {
                struct radeon_winsys_cs *cs = sctx->b.dma_cs;
 
-               si_need_dma_space(&sctx->b, 13, &rdst->resource, &rsrc->resource);
+               si_need_dma_space(sctx, 13, &rdst->resource, &rsrc->resource);
 
                radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
                                                CIK_SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW, 0) |
@@ -395,7 +395,7 @@ static bool cik_sdma_copy_texture(struct si_context *sctx,
                        struct radeon_winsys_cs *cs = sctx->b.dma_cs;
                        uint32_t direction = linear == rdst ? 1u << 31 : 0;
 
-                       si_need_dma_space(&sctx->b, 14, &rdst->resource, &rsrc->resource);
+                       si_need_dma_space(sctx, 14, &rdst->resource, &rsrc->resource);
 
                        radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
                                                        CIK_SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW, 0) |
@@ -489,7 +489,7 @@ static bool cik_sdma_copy_texture(struct si_context *sctx,
                      dstx + copy_width != (1 << 14)))) {
                        struct radeon_winsys_cs *cs = sctx->b.dma_cs;
 
-                       si_need_dma_space(&sctx->b, 15, &rdst->resource, &rsrc->resource);
+                       si_need_dma_space(sctx, 15, &rdst->resource, &rsrc->resource);
 
                        radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY,
                                                        CIK_SDMA_COPY_SUB_OPCODE_T2T_SUB_WINDOW, 0));
index bd210af1bd591254cfa82b4a6ab5b1c649ed3fb9..a8dc47e2a995c612946cc4dd14a565d0d1cec1e8 100644 (file)
@@ -1108,10 +1108,9 @@ static void si_dump_dma(struct si_context *sctx,
        fprintf(f, "SDMA Dump Done.\n");
 }
 
-void si_check_vm_faults(struct r600_common_context *ctx,
+void si_check_vm_faults(struct si_context *sctx,
                        struct radeon_saved_cs *saved, enum ring_type ring)
 {
-       struct si_context *sctx = (struct si_context *)ctx;
        struct pipe_screen *screen = sctx->b.b.screen;
        FILE *f;
        uint64_t addr;
index 46849e8e9565288165248e4239dd2ca910d18aef..5e982788fd5830dbe7cff1834340e25d0f855bf8 100644 (file)
@@ -59,7 +59,7 @@ static void si_dma_copy_buffer(struct si_context *ctx,
        }
 
        ncopy = DIV_ROUND_UP(size, max_size);
-       si_need_dma_space(&ctx->b, ncopy * 5, rdst, rsrc);
+       si_need_dma_space(ctx, ncopy * 5, rdst, rsrc);
 
        for (i = 0; i < ncopy; i++) {
                count = MIN2(size, max_size);
@@ -101,7 +101,7 @@ static void si_dma_clear_buffer(struct pipe_context *ctx,
 
        /* the same maximum size as for copying */
        ncopy = DIV_ROUND_UP(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
-       si_need_dma_space(&sctx->b, ncopy * 4, rdst, NULL);
+       si_need_dma_space(sctx, ncopy * 4, rdst, NULL);
 
        for (i = 0; i < ncopy; i++) {
                csize = MIN2(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
@@ -190,7 +190,7 @@ static void si_dma_copy_tile(struct si_context *ctx,
        mt = G_009910_MICRO_TILE_MODE(tile_mode);
        size = copy_height * pitch;
        ncopy = DIV_ROUND_UP(size, SI_DMA_COPY_MAX_DWORD_ALIGNED_SIZE);
-       si_need_dma_space(&ctx->b, ncopy * 9, &rdst->resource, &rsrc->resource);
+       si_need_dma_space(ctx, ncopy * 9, &rdst->resource, &rsrc->resource);
 
        for (i = 0; i < ncopy; i++) {
                cheight = copy_height;
index 31e5e4a2bf4859a9912501d19da6e4e96333fdeb..91e4e871d8adb7b42679d21f94ebbe36bac2d9d6 100644 (file)
 #include "si_pipe.h"
 #include "radeon/r600_cs.h"
 
-static void si_dma_emit_wait_idle(struct r600_common_context *rctx)
+static void si_dma_emit_wait_idle(struct si_context *sctx)
 {
-       struct radeon_winsys_cs *cs = rctx->dma_cs;
+       struct radeon_winsys_cs *cs = sctx->b.dma_cs;
 
        /* NOP waits for idle on Evergreen and later. */
-       if (rctx->chip_class >= CIK)
+       if (sctx->b.chip_class >= CIK)
                radeon_emit(cs, 0x00000000); /* NOP */
        else
                radeon_emit(cs, 0xf0000000); /* NOP */
 }
 
-void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
+void si_need_dma_space(struct si_context *ctx, unsigned num_dw,
                       struct r600_resource *dst, struct r600_resource *src)
 {
-       uint64_t vram = ctx->dma_cs->used_vram;
-       uint64_t gtt = ctx->dma_cs->used_gart;
+       uint64_t vram = ctx->b.dma_cs->used_vram;
+       uint64_t gtt = ctx->b.dma_cs->used_gart;
 
        if (dst) {
                vram += dst->vram_usage;
@@ -51,13 +51,13 @@ void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
        }
 
        /* Flush the GFX IB if DMA depends on it. */
-       if (radeon_emitted(ctx->gfx_cs, ctx->initial_gfx_cs_size) &&
+       if (radeon_emitted(ctx->b.gfx_cs, ctx->b.initial_gfx_cs_size) &&
            ((dst &&
-             ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs, dst->buf,
-                                              RADEON_USAGE_READWRITE)) ||
+             ctx->b.ws->cs_is_buffer_referenced(ctx->b.gfx_cs, dst->buf,
+                                                RADEON_USAGE_READWRITE)) ||
             (src &&
-             ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs, src->buf,
-                                              RADEON_USAGE_WRITE))))
+             ctx->b.ws->cs_is_buffer_referenced(ctx->b.gfx_cs, src->buf,
+                                                RADEON_USAGE_WRITE))))
                si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
 
        /* Flush if there's not enough space, or if the memory usage per IB
@@ -73,66 +73,66 @@ void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
         * engine busy while uploads are being submitted.
         */
        num_dw++; /* for emit_wait_idle below */
-       if (!ctx->ws->cs_check_space(ctx->dma_cs, num_dw) ||
-           ctx->dma_cs->used_vram + ctx->dma_cs->used_gart > 64 * 1024 * 1024 ||
-           !radeon_cs_memory_below_limit(ctx->screen, ctx->dma_cs, vram, gtt)) {
+       if (!ctx->b.ws->cs_check_space(ctx->b.dma_cs, num_dw) ||
+           ctx->b.dma_cs->used_vram + ctx->b.dma_cs->used_gart > 64 * 1024 * 1024 ||
+           !radeon_cs_memory_below_limit(ctx->screen, ctx->b.dma_cs, vram, gtt)) {
                si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
-               assert((num_dw + ctx->dma_cs->current.cdw) <= ctx->dma_cs->current.max_dw);
+               assert((num_dw + ctx->b.dma_cs->current.cdw) <= ctx->b.dma_cs->current.max_dw);
        }
 
        /* Wait for idle if either buffer has been used in the IB before to
         * prevent read-after-write hazards.
         */
        if ((dst &&
-            ctx->ws->cs_is_buffer_referenced(ctx->dma_cs, dst->buf,
-                                             RADEON_USAGE_READWRITE)) ||
+            ctx->b.ws->cs_is_buffer_referenced(ctx->b.dma_cs, dst->buf,
+                                               RADEON_USAGE_READWRITE)) ||
            (src &&
-            ctx->ws->cs_is_buffer_referenced(ctx->dma_cs, src->buf,
-                                             RADEON_USAGE_WRITE)))
+            ctx->b.ws->cs_is_buffer_referenced(ctx->b.dma_cs, src->buf,
+                                               RADEON_USAGE_WRITE)))
                si_dma_emit_wait_idle(ctx);
 
        if (dst) {
-               radeon_add_to_buffer_list(ctx, ctx->dma_cs, dst,
+               radeon_add_to_buffer_list(&ctx->b, ctx->b.dma_cs, dst,
                                          RADEON_USAGE_WRITE,
                                          RADEON_PRIO_SDMA_BUFFER);
        }
        if (src) {
-               radeon_add_to_buffer_list(ctx, ctx->dma_cs, src,
+               radeon_add_to_buffer_list(&ctx->b, ctx->b.dma_cs, src,
                                          RADEON_USAGE_READ,
                                          RADEON_PRIO_SDMA_BUFFER);
        }
 
        /* this function is called before all DMA calls, so increment this. */
-       ctx->num_dma_calls++;
+       ctx->b.num_dma_calls++;
 }
 
-void si_flush_dma_cs(void *ctx, unsigned flags, struct pipe_fence_handle **fence)
+void si_flush_dma_cs(struct si_context *ctx, unsigned flags,
+                    struct pipe_fence_handle **fence)
 {
-       struct r600_common_context *rctx = (struct r600_common_context *)ctx;
-       struct radeon_winsys_cs *cs = rctx->dma_cs;
+       struct radeon_winsys_cs *cs = ctx->b.dma_cs;
        struct radeon_saved_cs saved;
-       bool check_vm = (rctx->screen->debug_flags & DBG(CHECK_VM));
+       bool check_vm = (ctx->screen->debug_flags & DBG(CHECK_VM)) != 0;
 
        if (!radeon_emitted(cs, 0)) {
                if (fence)
-                       rctx->ws->fence_reference(fence, rctx->last_sdma_fence);
+                       ctx->b.ws->fence_reference(fence, ctx->b.last_sdma_fence);
                return;
        }
 
        if (check_vm)
-               si_save_cs(rctx->ws, cs, &saved, true);
+               si_save_cs(ctx->b.ws, cs, &saved, true);
 
-       rctx->ws->cs_flush(cs, flags, &rctx->last_sdma_fence);
+       ctx->b.ws->cs_flush(cs, flags, &ctx->b.last_sdma_fence);
        if (fence)
-               rctx->ws->fence_reference(fence, rctx->last_sdma_fence);
+               ctx->b.ws->fence_reference(fence, ctx->b.last_sdma_fence);
 
        if (check_vm) {
                /* Use conservative timeout 800ms, after which we won't wait any
                 * longer and assume the GPU is hung.
                 */
-               rctx->ws->fence_wait(rctx->ws, rctx->last_sdma_fence, 800*1000*1000);
+               ctx->b.ws->fence_wait(ctx->b.ws, ctx->b.last_sdma_fence, 800*1000*1000);
 
-               si_check_vm_faults(rctx, &saved, RING_DMA);
+               si_check_vm_faults(ctx, &saved, RING_DMA);
                si_clear_saved_cs(&saved);
        }
 }
@@ -140,10 +140,10 @@ void si_flush_dma_cs(void *ctx, unsigned flags, struct pipe_fence_handle **fence
 void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst,
                            uint64_t offset, uint64_t size, unsigned value)
 {
-       struct r600_common_context *rctx = (struct r600_common_context*)sscreen->aux_context;
+       struct si_context *ctx = (struct si_context*)sscreen->aux_context;
 
        mtx_lock(&sscreen->aux_context_lock);
-       rctx->dma_clear_buffer(&rctx->b, dst, offset, size, value);
+       ctx->b.dma_clear_buffer(&ctx->b.b, dst, offset, size, value);
        sscreen->aux_context->flush(sscreen->aux_context, NULL, 0);
        mtx_unlock(&sscreen->aux_context_lock);
 }
index 6380a1b246b0e31f035fa03c867e640a16a270e7..3522244a01689847ed641c4239a6a2fe63f95d1e 100644 (file)
@@ -46,7 +46,7 @@ struct si_multi_fence {
 
        /* If the context wasn't flushed at fence creation, this is non-NULL. */
        struct {
-               struct r600_common_context *ctx;
+               struct si_context *ctx;
                unsigned ib_index;
        } gfx_unflushed;
 
@@ -174,14 +174,14 @@ void si_gfx_wait_fence(struct r600_common_context *ctx,
        radeon_emit(cs, 4); /* poll interval */
 }
 
-static void si_add_fence_dependency(struct r600_common_context *rctx,
+static void si_add_fence_dependency(struct si_context *sctx,
                                    struct pipe_fence_handle *fence)
 {
-       struct radeon_winsys *ws = rctx->ws;
+       struct radeon_winsys *ws = sctx->b.ws;
 
-       if (rctx->dma_cs)
-               ws->cs_add_fence_dependency(rctx->dma_cs, fence);
-       ws->cs_add_fence_dependency(rctx->gfx_cs, fence);
+       if (sctx->b.dma_cs)
+               ws->cs_add_fence_dependency(sctx->b.dma_cs, fence);
+       ws->cs_add_fence_dependency(sctx->b.gfx_cs, fence);
 }
 
 static void si_add_syncobj_signal(struct r600_common_context *rctx,
@@ -351,7 +351,7 @@ static boolean si_fence_finish(struct pipe_screen *screen,
                struct si_context *sctx;
 
                sctx = (struct si_context *)threaded_context_unwrap_unsync(ctx);
-               if (rfence->gfx_unflushed.ctx == &sctx->b &&
+               if (rfence->gfx_unflushed.ctx == sctx &&
                    rfence->gfx_unflushed.ib_index == sctx->b.num_gfx_cs_flushes) {
                        /* Section 4.1.2 (Signaling) of the OpenGL 4.6 (Core profile)
                         * spec says:
@@ -496,8 +496,8 @@ static void si_flush_from_st(struct pipe_context *ctx,
                             unsigned flags)
 {
        struct pipe_screen *screen = ctx->screen;
-       struct r600_common_context *rctx = (struct r600_common_context *)ctx;
-       struct radeon_winsys *ws = rctx->ws;
+       struct si_context *sctx = (struct si_context *)ctx;
+       struct radeon_winsys *ws = sctx->b.ws;
        struct pipe_fence_handle *gfx_fence = NULL;
        struct pipe_fence_handle *sdma_fence = NULL;
        bool deferred_fence = false;
@@ -511,18 +511,18 @@ static void si_flush_from_st(struct pipe_context *ctx,
                assert(flags & PIPE_FLUSH_DEFERRED);
                assert(fence);
 
-               si_fine_fence_set((struct si_context *)rctx, &fine, flags);
+               si_fine_fence_set(sctx, &fine, flags);
        }
 
        /* DMA IBs are preambles to gfx IBs, therefore must be flushed first. */
-       if (rctx->dma_cs)
-               si_flush_dma_cs(rctx, rflags, fence ? &sdma_fence : NULL);
+       if (sctx->b.dma_cs)
+               si_flush_dma_cs(sctx, rflags, fence ? &sdma_fence : NULL);
 
-       if (!radeon_emitted(rctx->gfx_cs, rctx->initial_gfx_cs_size)) {
+       if (!radeon_emitted(sctx->b.gfx_cs, sctx->b.initial_gfx_cs_size)) {
                if (fence)
-                       ws->fence_reference(&gfx_fence, rctx->last_gfx_fence);
+                       ws->fence_reference(&gfx_fence, sctx->b.last_gfx_fence);
                if (!(flags & PIPE_FLUSH_DEFERRED))
-                       ws->cs_sync_flush(rctx->gfx_cs);
+                       ws->cs_sync_flush(sctx->b.gfx_cs);
        } else {
                /* Instead of flushing, create a deferred fence. Constraints:
                 * - The state tracker must allow a deferred flush.
@@ -533,10 +533,10 @@ static void si_flush_from_st(struct pipe_context *ctx,
                if (flags & PIPE_FLUSH_DEFERRED &&
                    !(flags & PIPE_FLUSH_FENCE_FD) &&
                    fence) {
-                       gfx_fence = rctx->ws->cs_get_next_fence(rctx->gfx_cs);
+                       gfx_fence = sctx->b.ws->cs_get_next_fence(sctx->b.gfx_cs);
                        deferred_fence = true;
                } else {
-                       si_flush_gfx_cs(rctx, rflags, fence ? &gfx_fence : NULL);
+                       si_flush_gfx_cs(sctx, rflags, fence ? &gfx_fence : NULL);
                }
        }
 
@@ -564,8 +564,8 @@ static void si_flush_from_st(struct pipe_context *ctx,
                multi_fence->sdma = sdma_fence;
 
                if (deferred_fence) {
-                       multi_fence->gfx_unflushed.ctx = rctx;
-                       multi_fence->gfx_unflushed.ib_index = rctx->num_gfx_cs_flushes;
+                       multi_fence->gfx_unflushed.ctx = sctx;
+                       multi_fence->gfx_unflushed.ib_index = sctx->b.num_gfx_cs_flushes;
                }
 
                multi_fence->fine = fine;
@@ -579,9 +579,9 @@ static void si_flush_from_st(struct pipe_context *ctx,
        assert(!fine.buf);
 finish:
        if (!(flags & PIPE_FLUSH_DEFERRED)) {
-               if (rctx->dma_cs)
-                       ws->cs_sync_flush(rctx->dma_cs);
-               ws->cs_sync_flush(rctx->gfx_cs);
+               if (sctx->b.dma_cs)
+                       ws->cs_sync_flush(sctx->b.dma_cs);
+               ws->cs_sync_flush(sctx->b.gfx_cs);
        }
 }
 
@@ -615,14 +615,14 @@ static void si_fence_server_signal(struct pipe_context *ctx,
 static void si_fence_server_sync(struct pipe_context *ctx,
                                 struct pipe_fence_handle *fence)
 {
-       struct r600_common_context *rctx = (struct r600_common_context *)ctx;
+       struct si_context *sctx = (struct si_context *)ctx;
        struct si_multi_fence *rfence = (struct si_multi_fence *)fence;
 
        util_queue_fence_wait(&rfence->ready);
 
        /* Unflushed fences from the same context are no-ops. */
        if (rfence->gfx_unflushed.ctx &&
-           rfence->gfx_unflushed.ctx == rctx)
+           rfence->gfx_unflushed.ctx == sctx)
                return;
 
        /* All unflushed commands will not start execution before
@@ -633,9 +633,9 @@ static void si_fence_server_sync(struct pipe_context *ctx,
        si_flush_from_st(ctx, NULL, PIPE_FLUSH_ASYNC);
 
        if (rfence->sdma)
-               si_add_fence_dependency(rctx, rfence->sdma);
+               si_add_fence_dependency(sctx, rfence->sdma);
        if (rfence->gfx)
-               si_add_fence_dependency(rctx, rfence->gfx);
+               si_add_fence_dependency(sctx, rfence->gfx);
 }
 
 void si_init_fence_functions(struct si_context *ctx)
index 685ba1234b5c82b52e4253a4c46e9e094ee1aa00..203d7704f36b32f49f56be4045691440a57bd196 100644 (file)
@@ -139,7 +139,7 @@ void si_flush_gfx_cs(void *context, unsigned flags,
                 */
                ctx->b.ws->fence_wait(ctx->b.ws, ctx->b.last_gfx_fence, 800*1000*1000);
 
-               si_check_vm_faults(&ctx->b, &ctx->current_saved_cs->gfx, RING_GFX);
+               si_check_vm_faults(ctx, &ctx->current_saved_cs->gfx, RING_GFX);
        }
 
        if (ctx->current_saved_cs)
index 1c6e10f7dc92b20ef17b14a9b157b6dbc19b5efa..91300113e2ceca7a8e537fd47278183c34709d30 100644 (file)
@@ -714,7 +714,7 @@ void si_log_hw_flush(struct si_context *sctx);
 void si_log_draw_state(struct si_context *sctx, struct u_log_context *log);
 void si_log_compute_state(struct si_context *sctx, struct u_log_context *log);
 void si_init_debug_functions(struct si_context *sctx);
-void si_check_vm_faults(struct r600_common_context *ctx,
+void si_check_vm_faults(struct si_context *sctx,
                        struct radeon_saved_cs *saved, enum ring_type ring);
 bool si_replace_shader(unsigned num, struct ac_shader_binary *binary);
 
@@ -722,9 +722,10 @@ bool si_replace_shader(unsigned num, struct ac_shader_binary *binary);
 void si_init_dma_functions(struct si_context *sctx);
 
 /* si_dma_cs.c */
-void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
+void si_need_dma_space(struct si_context *ctx, unsigned num_dw,
                       struct r600_resource *dst, struct r600_resource *src);
-void si_flush_dma_cs(void *ctx, unsigned flags, struct pipe_fence_handle **fence);
+void si_flush_dma_cs(struct si_context *ctx, unsigned flags,
+                    struct pipe_fence_handle **fence);
 void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst,
                            uint64_t offset, uint64_t size, unsigned value);