freedreno/ir3: debug cleanup
[mesa.git] / src / gallium / drivers / freedreno / freedreno_context.c
index 3da058dcdefd04b3e39638e8ec0fe23c992c8670..b2ac396d9d110547d001a6fecc4f0fd22d6f22e1 100644 (file)
@@ -1,5 +1,3 @@
-/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
-
 /*
  * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
  *
@@ -27,6 +25,7 @@
  */
 
 #include "freedreno_context.h"
+#include "freedreno_blitter.h"
 #include "freedreno_draw.h"
 #include "freedreno_fence.h"
 #include "freedreno_program.h"
@@ -45,30 +44,59 @@ fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
 {
        struct fd_context *ctx = fd_context(pctx);
        struct pipe_fence_handle *fence = NULL;
+       // TODO we want to lookup batch if it exists, but not create one if not.
+       struct fd_batch *batch = fd_context_batch(ctx);
+
+       DBG("%p: flush: flags=%x\n", ctx->batch, flags);
+
+       /* if no rendering since last flush, ie. app just decided it needed
+        * a fence, re-use the last one:
+        */
+       if (ctx->last_fence) {
+               fd_fence_ref(&fence, ctx->last_fence);
+               goto out;
+       }
+
+       if (!batch)
+               return;
 
        /* Take a ref to the batch's fence (batch can be unref'd when flushed: */
-       fd_fence_ref(pctx->screen, &fence, ctx->batch->fence);
+       fd_fence_ref(&fence, batch->fence);
 
-       if (flags & PIPE_FLUSH_FENCE_FD)
-               ctx->batch->needs_out_fence_fd = true;
+       /* TODO is it worth trying to figure out if app is using fence-fd's, to
+        * avoid requesting one every batch?
+        */
+       batch->needs_out_fence_fd = true;
 
        if (!ctx->screen->reorder) {
-               fd_batch_flush(ctx->batch, true, false);
+               fd_batch_flush(batch, true);
        } else if (flags & PIPE_FLUSH_DEFERRED) {
                fd_bc_flush_deferred(&ctx->screen->batch_cache, ctx);
        } else {
                fd_bc_flush(&ctx->screen->batch_cache, ctx);
        }
 
+out:
        if (fencep)
-               fd_fence_ref(pctx->screen, fencep, fence);
+               fd_fence_ref(fencep, fence);
+
+       fd_fence_ref(&ctx->last_fence, fence);
 
-       fd_fence_ref(pctx->screen, &fence, NULL);
+       fd_fence_ref(&fence, NULL);
 }
 
 static void
 fd_texture_barrier(struct pipe_context *pctx, unsigned flags)
 {
+       if (flags == PIPE_TEXTURE_BARRIER_FRAMEBUFFER) {
+               struct fd_context *ctx = fd_context(pctx);
+
+               if (ctx->framebuffer_barrier) {
+                       ctx->framebuffer_barrier(ctx);
+                       return;
+               }
+       }
+
        /* On devices that could sample from GMEM we could possibly do better.
         * Or if we knew that we were doing GMEM bypass we could just emit a
         * cache flush, perhaps?  But we don't know if future draws would cause
@@ -77,6 +105,16 @@ fd_texture_barrier(struct pipe_context *pctx, unsigned flags)
        fd_context_flush(pctx, NULL, 0);
 }
 
+static void
+fd_memory_barrier(struct pipe_context *pctx, unsigned flags)
+{
+       if (!(flags & ~PIPE_BARRIER_UPDATE))
+               return;
+
+       fd_context_flush(pctx, NULL, 0);
+       /* TODO do we need to check for persistently mapped buffers and fd_bo_cpu_prep()?? */
+}
+
 /**
  * emit marker string as payload of a no-op packet, which can be
  * decoded by cffdump.
@@ -91,6 +129,8 @@ fd_emit_string_marker(struct pipe_context *pctx, const char *string, int len)
        if (!ctx->batch)
                return;
 
+       ctx->batch->needs_flush = true;
+
        ring = ctx->batch->draw;
 
        /* max packet size is 0x3fff dwords: */
@@ -122,9 +162,12 @@ fd_context_destroy(struct pipe_context *pctx)
 
        DBG("");
 
+       fd_fence_ref(&ctx->last_fence, NULL);
+
        if (ctx->screen->reorder && util_queue_is_initialized(&ctx->flush_queue))
                util_queue_destroy(&ctx->flush_queue);
 
+       util_copy_framebuffer_state(&ctx->framebuffer, NULL);
        fd_batch_reference(&ctx->batch, NULL);  /* unref current batch */
        fd_bc_invalidate_context(ctx);
 
@@ -160,8 +203,6 @@ fd_context_destroy(struct pipe_context *pctx)
                        (uint32_t)ctx->stats.batch_gmem, (uint32_t)ctx->stats.batch_nondraw,
                        (uint32_t)ctx->stats.batch_restore);
        }
-
-       FREE(ctx);
 }
 
 static void
@@ -176,6 +217,39 @@ fd_set_debug_callback(struct pipe_context *pctx,
                memset(&ctx->debug, 0, sizeof(ctx->debug));
 }
 
+static uint32_t
+fd_get_reset_count(struct fd_context *ctx, bool per_context)
+{
+       uint64_t val;
+       enum fd_param_id param =
+               per_context ? FD_CTX_FAULTS : FD_GLOBAL_FAULTS;
+       int ret = fd_pipe_get_param(ctx->pipe, param, &val);
+       debug_assert(!ret);
+       return val;
+}
+
+static enum pipe_reset_status
+fd_get_device_reset_status(struct pipe_context *pctx)
+{
+       struct fd_context *ctx = fd_context(pctx);
+       int context_faults = fd_get_reset_count(ctx, true);
+       int global_faults  = fd_get_reset_count(ctx, false);
+       enum pipe_reset_status status;
+
+       if (context_faults != ctx->context_reset_count) {
+               status = PIPE_GUILTY_CONTEXT_RESET;
+       } else if (global_faults != ctx->global_reset_count) {
+               status = PIPE_INNOCENT_CONTEXT_RESET;
+       } else {
+               status = PIPE_NO_RESET;
+       }
+
+       ctx->context_reset_count = context_faults;
+       ctx->global_reset_count = global_faults;
+
+       return status;
+}
+
 /* TODO we could combine a few of these small buffers (solid_vbuf,
  * blit_texcoord_vbuf, and vsc_size_mem, into a single buffer and
  * save a tiny bit of memory
@@ -258,10 +332,24 @@ fd_context_init(struct fd_context *ctx, struct pipe_screen *pscreen,
 {
        struct fd_screen *screen = fd_screen(pscreen);
        struct pipe_context *pctx;
+       unsigned prio = 1;
        int i;
 
+       /* lower numerical value == higher priority: */
+       if (fd_mesa_debug & FD_DBG_HIPRIO)
+               prio = 0;
+       else if (flags & PIPE_CONTEXT_HIGH_PRIORITY)
+               prio = 0;
+       else if (flags & PIPE_CONTEXT_LOW_PRIORITY)
+               prio = 2;
+
        ctx->screen = screen;
-       ctx->pipe = fd_pipe_new(screen->dev, FD_PIPE_3D);
+       ctx->pipe = fd_pipe_new2(screen->dev, FD_PIPE_3D, prio);
+
+       if (fd_device_version(screen->dev) >= FD_VERSION_ROBUSTNESS) {
+               ctx->context_reset_count = fd_get_reset_count(ctx, true);
+               ctx->global_reset_count = fd_get_reset_count(ctx, false);
+       }
 
        ctx->primtypes = primtypes;
        ctx->primtype_mask = 0;
@@ -280,16 +368,19 @@ fd_context_init(struct fd_context *ctx, struct pipe_screen *pscreen,
        pctx->flush = fd_context_flush;
        pctx->emit_string_marker = fd_emit_string_marker;
        pctx->set_debug_callback = fd_set_debug_callback;
+       pctx->get_device_reset_status = fd_get_device_reset_status;
        pctx->create_fence_fd = fd_create_fence_fd;
        pctx->fence_server_sync = fd_fence_server_sync;
        pctx->texture_barrier = fd_texture_barrier;
+       pctx->memory_barrier = fd_memory_barrier;
 
        pctx->stream_uploader = u_upload_create_default(pctx);
        if (!pctx->stream_uploader)
                goto fail;
        pctx->const_uploader = pctx->stream_uploader;
 
-       ctx->batch = fd_bc_alloc_batch(&screen->batch_cache, ctx);
+       if (!ctx->screen->reorder)
+               ctx->batch = fd_bc_alloc_batch(&screen->batch_cache, ctx, false);
 
        slab_create_child(&ctx->transfer_pool, &screen->transfer_pool);