r600: add some missing cayman register defines
[mesa.git] / src / gallium / drivers / freedreno / freedreno_context.c
index 8a86f0be149f1392260a083d4947b586ed034f11..0ec81f882daf7341ceb88097a5323075225f3246 100644 (file)
 #include "util/u_upload_mgr.h"
 
 static void
-fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fence,
+fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
                unsigned flags)
 {
        struct fd_context *ctx = fd_context(pctx);
+       struct pipe_fence_handle *fence = NULL;
+
+       /* Take a ref to the batch's fence (batch can be unref'd when flushed: */
+       fd_fence_ref(pctx->screen, &fence, ctx->batch->fence);
 
        if (flags & PIPE_FLUSH_FENCE_FD)
                ctx->batch->needs_out_fence_fd = true;
 
        if (!ctx->screen->reorder) {
-               fd_batch_flush(ctx->batch, true);
+               fd_batch_flush(ctx->batch, true, false);
+       } else if (flags & PIPE_FLUSH_DEFERRED) {
+               fd_bc_flush_deferred(&ctx->screen->batch_cache, ctx);
        } else {
                fd_bc_flush(&ctx->screen->batch_cache, ctx);
        }
 
-       if (fence) {
-               /* if there hasn't been any rendering submitted yet, we might not
-                * have actually created a fence
-                */
-               if (!ctx->last_fence || ctx->batch->needs_out_fence_fd) {
-                       ctx->batch->needs_flush = true;
-                       fd_gmem_render_noop(ctx->batch);
-                       fd_batch_reset(ctx->batch);
-               }
-               fd_fence_ref(pctx->screen, fence, ctx->last_fence);
-       }
+       if (fencep)
+               fd_fence_ref(pctx->screen, fencep, fence);
+
+       fd_fence_ref(pctx->screen, &fence, NULL);
+}
+
+static void
+fd_texture_barrier(struct pipe_context *pctx, unsigned flags)
+{
+       /* On devices that could sample from GMEM we could possibly do better.
+        * Or if we knew that we were doing GMEM bypass we could just emit a
+        * cache flush, perhaps?  But we don't know if future draws would cause
+        * us to use GMEM, and a flush in bypass isn't the end of the world.
+        */
+       fd_context_flush(pctx, NULL, 0);
 }
 
 /**
@@ -118,8 +128,6 @@ fd_context_destroy(struct pipe_context *pctx)
        fd_batch_reference(&ctx->batch, NULL);  /* unref current batch */
        fd_bc_invalidate_context(ctx);
 
-       fd_fence_ref(pctx->screen, &ctx->last_fence, NULL);
-
        fd_prog_fini(pctx);
 
        if (ctx->blitter)
@@ -136,14 +144,15 @@ fd_context_destroy(struct pipe_context *pctx)
 
        slab_destroy_child(&ctx->transfer_pool);
 
-       for (i = 0; i < ARRAY_SIZE(ctx->pipe); i++) {
-               struct fd_vsc_pipe *pipe = &ctx->pipe[i];
+       for (i = 0; i < ARRAY_SIZE(ctx->vsc_pipe); i++) {
+               struct fd_vsc_pipe *pipe = &ctx->vsc_pipe[i];
                if (!pipe->bo)
                        break;
                fd_bo_del(pipe->bo);
        }
 
        fd_device_del(ctx->dev);
+       fd_pipe_del(ctx->pipe);
 
        if (fd_mesa_debug & (FD_DBG_BSTAT | FD_DBG_MSGS)) {
                printf("batch_total=%u, batch_sysmem=%u, batch_gmem=%u, batch_restore=%u\n",
@@ -244,13 +253,14 @@ fd_context_cleanup_common_vbos(struct fd_context *ctx)
 
 struct pipe_context *
 fd_context_init(struct fd_context *ctx, struct pipe_screen *pscreen,
-               const uint8_t *primtypes, void *priv)
+               const uint8_t *primtypes, void *priv, unsigned flags)
 {
        struct fd_screen *screen = fd_screen(pscreen);
        struct pipe_context *pctx;
        int i;
 
        ctx->screen = screen;
+       ctx->pipe = fd_pipe_new(screen->dev, FD_PIPE_3D);
 
        ctx->primtypes = primtypes;
        ctx->primtype_mask = 0;
@@ -271,19 +281,14 @@ fd_context_init(struct fd_context *ctx, struct pipe_screen *pscreen,
        pctx->set_debug_callback = fd_set_debug_callback;
        pctx->create_fence_fd = fd_create_fence_fd;
        pctx->fence_server_sync = fd_fence_server_sync;
+       pctx->texture_barrier = fd_texture_barrier;
 
        pctx->stream_uploader = u_upload_create_default(pctx);
        if (!pctx->stream_uploader)
                goto fail;
        pctx->const_uploader = pctx->stream_uploader;
 
-       /* TODO what about compute?  Ideally it creates it's own independent
-        * batches per compute job (since it isn't using tiling, so no point
-        * in getting involved with the re-ordering madness)..
-        */
-       if (!screen->reorder) {
-               ctx->batch = fd_bc_alloc_batch(&screen->batch_cache, ctx);
-       }
+       ctx->batch = fd_bc_alloc_batch(&screen->batch_cache, ctx);
 
        slab_create_child(&ctx->transfer_pool, &screen->transfer_pool);