X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Ffreedreno%2Ffreedreno_context.c;h=ca3b793000d2eb159741fb8a48e48077381714cd;hb=b8fbb39ab2c962e38f6c9d668de57582faf39b70;hp=66088da8c7e6bc31e25d10bfbbd3de54fb8842cf;hpb=37464efa3f80c141c2d73af5615e401763b2bbc8;p=mesa.git diff --git a/src/gallium/drivers/freedreno/freedreno_context.c b/src/gallium/drivers/freedreno/freedreno_context.c index 66088da8c7e..ca3b793000d 100644 --- a/src/gallium/drivers/freedreno/freedreno_context.c +++ b/src/gallium/drivers/freedreno/freedreno_context.c @@ -1,5 +1,3 @@ -/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */ - /* * Copyright (C) 2012 Rob Clark * @@ -46,30 +44,65 @@ fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep, { struct fd_context *ctx = fd_context(pctx); struct pipe_fence_handle *fence = NULL; + // TODO we want to lookup batch if it exists, but not create one if not. + struct fd_batch *batch = fd_context_batch(ctx); + + DBG("%p: flush: flags=%x\n", ctx->batch, flags); + + /* In some sequence of events, we can end up with a last_fence that is + * not an "fd" fence, which results in eglDupNativeFenceFDANDROID() + * errors. + * + */ + if (flags & PIPE_FLUSH_FENCE_FD) + fd_fence_ref(&ctx->last_fence, NULL); + + /* if no rendering since last flush, ie. app just decided it needed + * a fence, re-use the last one: + */ + if (ctx->last_fence) { + fd_fence_ref(&fence, ctx->last_fence); + goto out; + } + + if (!batch) + return; /* Take a ref to the batch's fence (batch can be unref'd when flushed: */ - fd_fence_ref(pctx->screen, &fence, ctx->batch->fence); + fd_fence_ref(&fence, batch->fence); if (flags & PIPE_FLUSH_FENCE_FD) - ctx->batch->needs_out_fence_fd = true; + batch->needs_out_fence_fd = true; if (!ctx->screen->reorder) { - fd_batch_flush(ctx->batch, true, false); + fd_batch_flush(batch, true); } else if (flags & PIPE_FLUSH_DEFERRED) { fd_bc_flush_deferred(&ctx->screen->batch_cache, ctx); } else { fd_bc_flush(&ctx->screen->batch_cache, ctx); } +out: if (fencep) - fd_fence_ref(pctx->screen, fencep, fence); + fd_fence_ref(fencep, fence); - fd_fence_ref(pctx->screen, &fence, NULL); + fd_fence_ref(&ctx->last_fence, fence); + + fd_fence_ref(&fence, NULL); } static void fd_texture_barrier(struct pipe_context *pctx, unsigned flags) { + if (flags == PIPE_TEXTURE_BARRIER_FRAMEBUFFER) { + struct fd_context *ctx = fd_context(pctx); + + if (ctx->framebuffer_barrier) { + ctx->framebuffer_barrier(ctx); + return; + } + } + /* On devices that could sample from GMEM we could possibly do better. * Or if we knew that we were doing GMEM bypass we could just emit a * cache flush, perhaps? But we don't know if future draws would cause @@ -78,6 +111,16 @@ fd_texture_barrier(struct pipe_context *pctx, unsigned flags) fd_context_flush(pctx, NULL, 0); } +static void +fd_memory_barrier(struct pipe_context *pctx, unsigned flags) +{ + if (!(flags & ~PIPE_BARRIER_UPDATE)) + return; + + fd_context_flush(pctx, NULL, 0); + /* TODO do we need to check for persistently mapped buffers and fd_bo_cpu_prep()?? */ +} + /** * emit marker string as payload of a no-op packet, which can be * decoded by cffdump. @@ -92,6 +135,8 @@ fd_emit_string_marker(struct pipe_context *pctx, const char *string, int len) if (!ctx->batch) return; + ctx->batch->needs_flush = true; + ring = ctx->batch->draw; /* max packet size is 0x3fff dwords: */ @@ -123,9 +168,12 @@ fd_context_destroy(struct pipe_context *pctx) DBG(""); + fd_fence_ref(&ctx->last_fence, NULL); + if (ctx->screen->reorder && util_queue_is_initialized(&ctx->flush_queue)) util_queue_destroy(&ctx->flush_queue); + util_copy_framebuffer_state(&ctx->framebuffer, NULL); fd_batch_reference(&ctx->batch, NULL); /* unref current batch */ fd_bc_invalidate_context(ctx); @@ -161,8 +209,6 @@ fd_context_destroy(struct pipe_context *pctx) (uint32_t)ctx->stats.batch_gmem, (uint32_t)ctx->stats.batch_nondraw, (uint32_t)ctx->stats.batch_restore); } - - FREE(ctx); } static void @@ -177,6 +223,39 @@ fd_set_debug_callback(struct pipe_context *pctx, memset(&ctx->debug, 0, sizeof(ctx->debug)); } +static uint32_t +fd_get_reset_count(struct fd_context *ctx, bool per_context) +{ + uint64_t val; + enum fd_param_id param = + per_context ? FD_CTX_FAULTS : FD_GLOBAL_FAULTS; + int ret = fd_pipe_get_param(ctx->pipe, param, &val); + debug_assert(!ret); + return val; +} + +static enum pipe_reset_status +fd_get_device_reset_status(struct pipe_context *pctx) +{ + struct fd_context *ctx = fd_context(pctx); + int context_faults = fd_get_reset_count(ctx, true); + int global_faults = fd_get_reset_count(ctx, false); + enum pipe_reset_status status; + + if (context_faults != ctx->context_reset_count) { + status = PIPE_GUILTY_CONTEXT_RESET; + } else if (global_faults != ctx->global_reset_count) { + status = PIPE_INNOCENT_CONTEXT_RESET; + } else { + status = PIPE_NO_RESET; + } + + ctx->context_reset_count = context_faults; + ctx->global_reset_count = global_faults; + + return status; +} + /* TODO we could combine a few of these small buffers (solid_vbuf, * blit_texcoord_vbuf, and vsc_size_mem, into a single buffer and * save a tiny bit of memory @@ -259,10 +338,24 @@ fd_context_init(struct fd_context *ctx, struct pipe_screen *pscreen, { struct fd_screen *screen = fd_screen(pscreen); struct pipe_context *pctx; + unsigned prio = 1; int i; + /* lower numerical value == higher priority: */ + if (fd_mesa_debug & FD_DBG_HIPRIO) + prio = 0; + else if (flags & PIPE_CONTEXT_HIGH_PRIORITY) + prio = 0; + else if (flags & PIPE_CONTEXT_LOW_PRIORITY) + prio = 2; + ctx->screen = screen; - ctx->pipe = fd_pipe_new(screen->dev, FD_PIPE_3D); + ctx->pipe = fd_pipe_new2(screen->dev, FD_PIPE_3D, prio); + + if (fd_device_version(screen->dev) >= FD_VERSION_ROBUSTNESS) { + ctx->context_reset_count = fd_get_reset_count(ctx, true); + ctx->global_reset_count = fd_get_reset_count(ctx, false); + } ctx->primtypes = primtypes; ctx->primtype_mask = 0; @@ -281,22 +374,22 @@ fd_context_init(struct fd_context *ctx, struct pipe_screen *pscreen, pctx->flush = fd_context_flush; pctx->emit_string_marker = fd_emit_string_marker; pctx->set_debug_callback = fd_set_debug_callback; + pctx->get_device_reset_status = fd_get_device_reset_status; pctx->create_fence_fd = fd_create_fence_fd; pctx->fence_server_sync = fd_fence_server_sync; pctx->texture_barrier = fd_texture_barrier; + pctx->memory_barrier = fd_memory_barrier; pctx->stream_uploader = u_upload_create_default(pctx); if (!pctx->stream_uploader) goto fail; pctx->const_uploader = pctx->stream_uploader; - ctx->batch = fd_bc_alloc_batch(&screen->batch_cache, ctx); + if (!ctx->screen->reorder) + ctx->batch = fd_bc_alloc_batch(&screen->batch_cache, ctx, false); slab_create_child(&ctx->transfer_pool, &screen->transfer_pool); - if (!ctx->blit) - ctx->blit = fd_blitter_blit; - fd_draw_init(pctx); fd_resource_context_init(pctx); fd_query_context_init(pctx);