-/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
-
/*
* Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
*
*/
#include "freedreno_context.h"
+#include "freedreno_blitter.h"
#include "freedreno_draw.h"
#include "freedreno_fence.h"
#include "freedreno_program.h"
#include "util/u_upload_mgr.h"
static void
-fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fence,
+fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
unsigned flags)
{
struct fd_context *ctx = fd_context(pctx);
+ struct pipe_fence_handle *fence = NULL;
+ // TODO we want to lookup batch if it exists, but not create one if not.
+ struct fd_batch *batch = fd_context_batch(ctx);
+
+ DBG("%p: flush: flags=%x\n", ctx->batch, flags);
+ /* In some sequence of events, we can end up with a last_fence that is
+ * not an "fd" fence, which results in eglDupNativeFenceFDANDROID()
+ * errors.
+ *
+ */
if (flags & PIPE_FLUSH_FENCE_FD)
- ctx->batch->needs_out_fence_fd = true;
+ fd_fence_ref(&ctx->last_fence, NULL);
+
+ /* if no rendering since last flush, ie. app just decided it needed
+ * a fence, re-use the last one:
+ */
+ if (ctx->last_fence) {
+ fd_fence_ref(&fence, ctx->last_fence);
+ goto out;
+ }
+
+ if (!batch)
+ return;
+
+ /* Take a ref to the batch's fence (batch can be unref'd when flushed: */
+ fd_fence_ref(&fence, batch->fence);
+
+ if (flags & PIPE_FLUSH_FENCE_FD)
+ batch->needs_out_fence_fd = true;
if (!ctx->screen->reorder) {
- fd_batch_flush(ctx->batch, true);
+ fd_batch_flush(batch, true);
+ } else if (flags & PIPE_FLUSH_DEFERRED) {
+ fd_bc_flush_deferred(&ctx->screen->batch_cache, ctx);
} else {
fd_bc_flush(&ctx->screen->batch_cache, ctx);
}
- if (fence) {
- /* if there hasn't been any rendering submitted yet, we might not
- * have actually created a fence
- */
- if (!ctx->last_fence || ctx->batch->needs_out_fence_fd) {
- ctx->batch->needs_flush = true;
- fd_gmem_render_noop(ctx->batch);
- fd_batch_reset(ctx->batch);
+out:
+ if (fencep)
+ fd_fence_ref(fencep, fence);
+
+ fd_fence_ref(&ctx->last_fence, fence);
+
+ fd_fence_ref(&fence, NULL);
+}
+
+static void
+fd_texture_barrier(struct pipe_context *pctx, unsigned flags)
+{
+ if (flags == PIPE_TEXTURE_BARRIER_FRAMEBUFFER) {
+ struct fd_context *ctx = fd_context(pctx);
+
+ if (ctx->framebuffer_barrier) {
+ ctx->framebuffer_barrier(ctx);
+ return;
}
- fd_fence_ref(pctx->screen, fence, ctx->last_fence);
}
+
+ /* On devices that could sample from GMEM we could possibly do better.
+ * Or if we knew that we were doing GMEM bypass we could just emit a
+ * cache flush, perhaps? But we don't know if future draws would cause
+ * us to use GMEM, and a flush in bypass isn't the end of the world.
+ */
+ fd_context_flush(pctx, NULL, 0);
+}
+
+static void
+fd_memory_barrier(struct pipe_context *pctx, unsigned flags)
+{
+ if (!(flags & ~PIPE_BARRIER_UPDATE))
+ return;
+
+ fd_context_flush(pctx, NULL, 0);
+ /* TODO do we need to check for persistently mapped buffers and fd_bo_cpu_prep()?? */
}
/**
if (!ctx->batch)
return;
+ ctx->batch->needs_flush = true;
+
ring = ctx->batch->draw;
/* max packet size is 0x3fff dwords: */
DBG("");
- if (ctx->screen->reorder)
+ fd_fence_ref(&ctx->last_fence, NULL);
+
+ if (ctx->screen->reorder && util_queue_is_initialized(&ctx->flush_queue))
util_queue_destroy(&ctx->flush_queue);
+ util_copy_framebuffer_state(&ctx->framebuffer, NULL);
fd_batch_reference(&ctx->batch, NULL); /* unref current batch */
fd_bc_invalidate_context(ctx);
- fd_fence_ref(pctx->screen, &ctx->last_fence, NULL);
-
fd_prog_fini(pctx);
- fd_hw_query_fini(pctx);
if (ctx->blitter)
util_blitter_destroy(ctx->blitter);
slab_destroy_child(&ctx->transfer_pool);
- for (i = 0; i < ARRAY_SIZE(ctx->pipe); i++) {
- struct fd_vsc_pipe *pipe = &ctx->pipe[i];
- if (!pipe->bo)
+ for (i = 0; i < ARRAY_SIZE(ctx->vsc_pipe_bo); i++) {
+ if (!ctx->vsc_pipe_bo[i])
break;
- fd_bo_del(pipe->bo);
+ fd_bo_del(ctx->vsc_pipe_bo[i]);
}
fd_device_del(ctx->dev);
+ fd_pipe_del(ctx->pipe);
+
+ mtx_destroy(&ctx->gmem_lock);
if (fd_mesa_debug & (FD_DBG_BSTAT | FD_DBG_MSGS)) {
- printf("batch_total=%u, batch_sysmem=%u, batch_gmem=%u, batch_restore=%u\n",
+ printf("batch_total=%u, batch_sysmem=%u, batch_gmem=%u, batch_nondraw=%u, batch_restore=%u\n",
(uint32_t)ctx->stats.batch_total, (uint32_t)ctx->stats.batch_sysmem,
- (uint32_t)ctx->stats.batch_gmem, (uint32_t)ctx->stats.batch_restore);
+ (uint32_t)ctx->stats.batch_gmem, (uint32_t)ctx->stats.batch_nondraw,
+ (uint32_t)ctx->stats.batch_restore);
}
-
- FREE(ctx);
}
static void
memset(&ctx->debug, 0, sizeof(ctx->debug));
}
+static uint32_t
+fd_get_reset_count(struct fd_context *ctx, bool per_context)
+{
+ uint64_t val;
+ enum fd_param_id param =
+ per_context ? FD_CTX_FAULTS : FD_GLOBAL_FAULTS;
+ int ret = fd_pipe_get_param(ctx->pipe, param, &val);
+ debug_assert(!ret);
+ return val;
+}
+
+static enum pipe_reset_status
+fd_get_device_reset_status(struct pipe_context *pctx)
+{
+ struct fd_context *ctx = fd_context(pctx);
+ int context_faults = fd_get_reset_count(ctx, true);
+ int global_faults = fd_get_reset_count(ctx, false);
+ enum pipe_reset_status status;
+
+ if (context_faults != ctx->context_reset_count) {
+ status = PIPE_GUILTY_CONTEXT_RESET;
+ } else if (global_faults != ctx->global_reset_count) {
+ status = PIPE_INNOCENT_CONTEXT_RESET;
+ } else {
+ status = PIPE_NO_RESET;
+ }
+
+ ctx->context_reset_count = context_faults;
+ ctx->global_reset_count = global_faults;
+
+ return status;
+}
+
/* TODO we could combine a few of these small buffers (solid_vbuf,
* blit_texcoord_vbuf, and vsc_size_mem, into a single buffer and
* save a tiny bit of memory
}});
ctx->solid_vbuf_state.vertexbuf.count = 1;
ctx->solid_vbuf_state.vertexbuf.vb[0].stride = 12;
- ctx->solid_vbuf_state.vertexbuf.vb[0].buffer = ctx->solid_vbuf;
+ ctx->solid_vbuf_state.vertexbuf.vb[0].buffer.resource = ctx->solid_vbuf;
/* setup blit_vbuf_state: */
ctx->blit_vbuf_state.vtx = pctx->create_vertex_elements_state(
}});
ctx->blit_vbuf_state.vertexbuf.count = 2;
ctx->blit_vbuf_state.vertexbuf.vb[0].stride = 8;
- ctx->blit_vbuf_state.vertexbuf.vb[0].buffer = ctx->blit_texcoord_vbuf;
+ ctx->blit_vbuf_state.vertexbuf.vb[0].buffer.resource = ctx->blit_texcoord_vbuf;
ctx->blit_vbuf_state.vertexbuf.vb[1].stride = 12;
- ctx->blit_vbuf_state.vertexbuf.vb[1].buffer = ctx->solid_vbuf;
+ ctx->blit_vbuf_state.vertexbuf.vb[1].buffer.resource = ctx->solid_vbuf;
}
void
struct pipe_context *
fd_context_init(struct fd_context *ctx, struct pipe_screen *pscreen,
- const uint8_t *primtypes, void *priv)
+ const uint8_t *primtypes, void *priv, unsigned flags)
{
struct fd_screen *screen = fd_screen(pscreen);
struct pipe_context *pctx;
+ unsigned prio = 1;
int i;
+ /* lower numerical value == higher priority: */
+ if (fd_mesa_debug & FD_DBG_HIPRIO)
+ prio = 0;
+ else if (flags & PIPE_CONTEXT_HIGH_PRIORITY)
+ prio = 0;
+ else if (flags & PIPE_CONTEXT_LOW_PRIORITY)
+ prio = 2;
+
ctx->screen = screen;
+ ctx->pipe = fd_pipe_new2(screen->dev, FD_PIPE_3D, prio);
+
+ if (fd_device_version(screen->dev) >= FD_VERSION_ROBUSTNESS) {
+ ctx->context_reset_count = fd_get_reset_count(ctx, true);
+ ctx->global_reset_count = fd_get_reset_count(ctx, false);
+ }
ctx->primtypes = primtypes;
ctx->primtype_mask = 0;
if (primtypes[i])
ctx->primtype_mask |= (1 << i);
+ (void) mtx_init(&ctx->gmem_lock, mtx_plain);
+
/* need some sane default in case state tracker doesn't
* set some state:
*/
pctx->flush = fd_context_flush;
pctx->emit_string_marker = fd_emit_string_marker;
pctx->set_debug_callback = fd_set_debug_callback;
+ pctx->get_device_reset_status = fd_get_device_reset_status;
pctx->create_fence_fd = fd_create_fence_fd;
pctx->fence_server_sync = fd_fence_server_sync;
+ pctx->texture_barrier = fd_texture_barrier;
+ pctx->memory_barrier = fd_memory_barrier;
pctx->stream_uploader = u_upload_create_default(pctx);
if (!pctx->stream_uploader)
goto fail;
pctx->const_uploader = pctx->stream_uploader;
- /* TODO what about compute? Ideally it creates it's own independent
- * batches per compute job (since it isn't using tiling, so no point
- * in getting involved with the re-ordering madness)..
- */
- if (!screen->reorder) {
- ctx->batch = fd_bc_alloc_batch(&screen->batch_cache, ctx);
- }
+ if (!ctx->screen->reorder)
+ ctx->batch = fd_bc_alloc_batch(&screen->batch_cache, ctx, false);
slab_create_child(&ctx->transfer_pool, &screen->transfer_pool);
fd_query_context_init(pctx);
fd_texture_init(pctx);
fd_state_init(pctx);
- fd_hw_query_init(pctx);
ctx->blitter = util_blitter_create(pctx);
if (!ctx->blitter)
if (!ctx->primconvert)
goto fail;
+ list_inithead(&ctx->hw_active_queries);
+ list_inithead(&ctx->acc_active_queries);
+
return pctx;
fail: