X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Ffreedreno%2Ffreedreno_draw.c;h=059de2ec8c612dcf9f1165bd4adc834f3b5a73a7;hb=c179ded9cb1bc3e42b887c1d3362c86befc3bbcc;hp=f88654063faa34f3540f608efb48fa463af98324;hpb=be8a8ebe578267ab24e343c3c1347936a221468e;p=mesa.git diff --git a/src/gallium/drivers/freedreno/freedreno_draw.c b/src/gallium/drivers/freedreno/freedreno_draw.c index f88654063fa..059de2ec8c6 100644 --- a/src/gallium/drivers/freedreno/freedreno_draw.c +++ b/src/gallium/drivers/freedreno/freedreno_draw.c @@ -1,5 +1,3 @@ -/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */ - /* * Copyright (C) 2012 Rob Clark * @@ -27,81 +25,142 @@ */ #include "pipe/p_state.h" +#include "util/u_draw.h" #include "util/u_string.h" #include "util/u_memory.h" #include "util/u_prim.h" #include "util/u_format.h" +#include "util/u_helpers.h" +#include "freedreno_blitter.h" #include "freedreno_draw.h" #include "freedreno_context.h" +#include "freedreno_fence.h" #include "freedreno_state.h" #include "freedreno_resource.h" +#include "freedreno_query_acc.h" #include "freedreno_query_hw.h" #include "freedreno_util.h" static void -resource_used(struct fd_context *ctx, struct pipe_resource *prsc, boolean reading) +resource_read(struct fd_batch *batch, struct pipe_resource *prsc) { - struct fd_resource *rsc; - if (!prsc) return; + fd_batch_resource_used(batch, fd_resource(prsc), false); +} - rsc = fd_resource(prsc); - if (reading) - rsc->reading = true; - else - rsc->writing = true; - list_delinit(&rsc->list); - list_addtail(&rsc->list, &ctx->used_resources); +static void +resource_written(struct fd_batch *batch, struct pipe_resource *prsc) +{ + if (!prsc) + return; + fd_batch_resource_used(batch, fd_resource(prsc), true); } static void fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) { struct fd_context *ctx = fd_context(pctx); - struct pipe_framebuffer_state *pfb = &ctx->framebuffer; - struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx); - unsigned i, prims, buffers = 0; + struct fd_batch *batch = fd_context_batch(ctx); + struct pipe_framebuffer_state *pfb = &batch->framebuffer; + unsigned i, prims, buffers = 0, restore_buffers = 0; - /* if we supported transform feedback, we'd have to disable this: */ - if (((scissor->maxx - scissor->minx) * - (scissor->maxy - scissor->miny)) == 0) { + /* for debugging problems with indirect draw, it is convenient + * to be able to emulate it, to determine if game is feeding us + * bogus data: + */ + if (info->indirect && (fd_mesa_debug & FD_DBG_NOINDR)) { + util_draw_indirect(pctx, info); return; } + if (!info->count_from_stream_output && !info->indirect && + !info->primitive_restart && + !u_trim_pipe_prim(info->mode, (unsigned*)&info->count)) + return; + + /* TODO: push down the region versions into the tiles */ + if (!fd_render_condition_check(pctx)) + return; + /* emulate unsupported primitives: */ if (!fd_supported_prim(ctx, info->mode)) { - util_primconvert_save_index_buffer(ctx->primconvert, &ctx->indexbuf); + if (ctx->streamout.num_targets > 0) + debug_error("stream-out with emulated prims"); util_primconvert_save_rasterizer_state(ctx->primconvert, ctx->rasterizer); util_primconvert_draw_vbo(ctx->primconvert, info); return; } - ctx->needs_flush = true; + fd_fence_ref(&ctx->last_fence, NULL); + + /* Upload a user index buffer. */ + struct pipe_resource *indexbuf = NULL; + unsigned index_offset = 0; + struct pipe_draw_info new_info; + if (info->index_size) { + if (info->has_user_indices) { + if (!util_upload_index_buffer(pctx, info, &indexbuf, &index_offset)) + return; + new_info = *info; + new_info.index.resource = indexbuf; + new_info.has_user_indices = false; + info = &new_info; + } else { + indexbuf = info->index.resource; + } + } + + if (ctx->in_blit) { + fd_batch_reset(batch); + fd_context_all_dirty(ctx); + } + + batch->blit = ctx->in_blit; + batch->back_blit = ctx->in_shadow; + + /* NOTE: needs to be before resource_written(batch->query_buf), otherwise + * query_buf may not be created yet. + */ + fd_batch_set_stage(batch, FD_STAGE_DRAW); /* * Figure out the buffers/features we need: */ - if (fd_depth_enabled(ctx)) { - buffers |= FD_BUFFER_DEPTH; - fd_resource(pfb->zsbuf->texture)->dirty = true; - ctx->gmem_reason |= FD_GMEM_DEPTH_ENABLED; - } - - if (fd_stencil_enabled(ctx)) { - struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture); - buffers |= FD_BUFFER_STENCIL; - if (rsc->stencil) - rsc->stencil->dirty = true; - else - rsc->dirty = true; - ctx->gmem_reason |= FD_GMEM_STENCIL_ENABLED; + mtx_lock(&ctx->screen->lock); + + if (ctx->dirty & (FD_DIRTY_FRAMEBUFFER | FD_DIRTY_ZSA)) { + if (fd_depth_enabled(ctx)) { + if (fd_resource(pfb->zsbuf->texture)->valid) { + restore_buffers |= FD_BUFFER_DEPTH; + } else { + batch->invalidated |= FD_BUFFER_DEPTH; + } + batch->gmem_reason |= FD_GMEM_DEPTH_ENABLED; + if (fd_depth_write_enabled(ctx)) { + buffers |= FD_BUFFER_DEPTH; + resource_written(batch, pfb->zsbuf->texture); + } else { + resource_read(batch, pfb->zsbuf->texture); + } + } + + if (fd_stencil_enabled(ctx)) { + if (fd_resource(pfb->zsbuf->texture)->valid) { + restore_buffers |= FD_BUFFER_STENCIL; + } else { + batch->invalidated |= FD_BUFFER_STENCIL; + } + batch->gmem_reason |= FD_GMEM_STENCIL_ENABLED; + buffers |= FD_BUFFER_STENCIL; + resource_written(batch, pfb->zsbuf->texture); + } } if (fd_logicop_enabled(ctx)) - ctx->gmem_reason |= FD_GMEM_LOGICOP_ENABLED; + batch->gmem_reason |= FD_GMEM_LOGICOP_ENABLED; for (i = 0; i < pfb->nr_cbufs; i++) { struct pipe_resource *surf; @@ -111,96 +170,162 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) surf = pfb->cbufs[i]->texture; - fd_resource(surf)->dirty = true; - buffers |= PIPE_CLEAR_COLOR0 << i; + if (fd_resource(surf)->valid) { + restore_buffers |= PIPE_CLEAR_COLOR0 << i; + } else { + batch->invalidated |= PIPE_CLEAR_COLOR0 << i; + } - if (surf->nr_samples > 1) - ctx->gmem_reason |= FD_GMEM_MSAA_ENABLED; + buffers |= PIPE_CLEAR_COLOR0 << i; if (fd_blend_enabled(ctx, i)) - ctx->gmem_reason |= FD_GMEM_BLEND_ENABLED; + batch->gmem_reason |= FD_GMEM_BLEND_ENABLED; + + if (ctx->dirty & FD_DIRTY_FRAMEBUFFER) + resource_written(batch, pfb->cbufs[i]->texture); } - /* Skip over buffer 0, that is sent along with the command stream */ - for (i = 1; i < PIPE_MAX_CONSTANT_BUFFERS; i++) { - resource_used(ctx, ctx->constbuf[PIPE_SHADER_VERTEX].cb[i].buffer, true); - resource_used(ctx, ctx->constbuf[PIPE_SHADER_FRAGMENT].cb[i].buffer, true); + /* Mark SSBOs as being written.. we don't actually know which ones are + * read vs written, so just assume the worst + */ + if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_SSBO) { + foreach_bit(i, ctx->shaderbuf[PIPE_SHADER_FRAGMENT].enabled_mask) + resource_written(batch, ctx->shaderbuf[PIPE_SHADER_FRAGMENT].sb[i].buffer); + } + + if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_IMAGE) { + foreach_bit(i, ctx->shaderimg[PIPE_SHADER_FRAGMENT].enabled_mask) { + struct pipe_image_view *img = + &ctx->shaderimg[PIPE_SHADER_FRAGMENT].si[i]; + if (img->access & PIPE_IMAGE_ACCESS_WRITE) + resource_written(batch, img->resource); + else + resource_read(batch, img->resource); + } + } + + if (ctx->dirty_shader[PIPE_SHADER_VERTEX] & FD_DIRTY_SHADER_CONST) { + foreach_bit(i, ctx->constbuf[PIPE_SHADER_VERTEX].enabled_mask) + resource_read(batch, ctx->constbuf[PIPE_SHADER_VERTEX].cb[i].buffer); + } + + if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_CONST) { + foreach_bit(i, ctx->constbuf[PIPE_SHADER_FRAGMENT].enabled_mask) + resource_read(batch, ctx->constbuf[PIPE_SHADER_FRAGMENT].cb[i].buffer); } /* Mark VBOs as being read */ - for (i = 0; i < ctx->vtx.vertexbuf.count; i++) { - assert(!ctx->vtx.vertexbuf.vb[i].user_buffer); - resource_used(ctx, ctx->vtx.vertexbuf.vb[i].buffer, true); + if (ctx->dirty & FD_DIRTY_VTXBUF) { + foreach_bit(i, ctx->vtx.vertexbuf.enabled_mask) { + assert(!ctx->vtx.vertexbuf.vb[i].is_user_buffer); + resource_read(batch, ctx->vtx.vertexbuf.vb[i].buffer.resource); + } } /* Mark index buffer as being read */ - resource_used(ctx, ctx->indexbuf.buffer, true); + resource_read(batch, indexbuf); + + /* Mark indirect draw buffer as being read */ + if (info->indirect) + resource_read(batch, info->indirect->buffer); /* Mark textures as being read */ - for (i = 0; i < ctx->verttex.num_textures; i++) - if (ctx->verttex.textures[i]) - resource_used(ctx, ctx->verttex.textures[i]->texture, true); - for (i = 0; i < ctx->fragtex.num_textures; i++) - if (ctx->fragtex.textures[i]) - resource_used(ctx, ctx->fragtex.textures[i]->texture, true); - - /* Mark streamout buffers as being read.. actually they are written.. */ - for (i = 0; i < ctx->streamout.num_targets; i++) - if (ctx->streamout.targets[i]) - resource_used(ctx, ctx->streamout.targets[i]->buffer, false); + if (ctx->dirty_shader[PIPE_SHADER_VERTEX] & FD_DIRTY_SHADER_TEX) { + foreach_bit(i, ctx->tex[PIPE_SHADER_VERTEX].valid_textures) + resource_read(batch, ctx->tex[PIPE_SHADER_VERTEX].textures[i]->texture); + } - ctx->num_draws++; + if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_TEX) { + foreach_bit(i, ctx->tex[PIPE_SHADER_FRAGMENT].valid_textures) + resource_read(batch, ctx->tex[PIPE_SHADER_FRAGMENT].textures[i]->texture); + } + + /* Mark streamout buffers as being written.. */ + if (ctx->dirty & FD_DIRTY_STREAMOUT) { + for (i = 0; i < ctx->streamout.num_targets; i++) + if (ctx->streamout.targets[i]) + resource_written(batch, ctx->streamout.targets[i]->buffer); + } + + resource_written(batch, batch->query_buf); + + list_for_each_entry(struct fd_acc_query, aq, &ctx->acc_active_queries, node) + resource_written(batch, aq->prsc); + + mtx_unlock(&ctx->screen->lock); + + batch->num_draws++; prims = u_reduced_prims_for_vertices(info->mode, info->count); ctx->stats.draw_calls++; - ctx->stats.prims_emitted += prims; + + /* TODO prims_emitted should be clipped when the stream-out buffer is + * not large enough. See max_tf_vtx().. probably need to move that + * into common code. Although a bit more annoying since a2xx doesn't + * use ir3 so no common way to get at the pipe_stream_output_info + * which is needed for this calculation. + */ + if (ctx->streamout.num_targets > 0) + ctx->stats.prims_emitted += prims; + ctx->stats.prims_generated += prims; /* any buffers that haven't been cleared yet, we need to restore: */ - ctx->restore |= buffers & (FD_BUFFER_ALL & ~ctx->cleared); + batch->restore |= restore_buffers & (FD_BUFFER_ALL & ~batch->invalidated); /* and any buffers used, need to be resolved: */ - ctx->resolve |= buffers; + batch->resolve |= buffers; - DBG("%x num_draws=%u (%s/%s)", buffers, ctx->num_draws, + DBG("%p: %x %ux%u num_draws=%u (%s/%s)", batch, buffers, + pfb->width, pfb->height, batch->num_draws, util_format_short_name(pipe_surface_format(pfb->cbufs[0])), util_format_short_name(pipe_surface_format(pfb->zsbuf))); - fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_DRAW); - ctx->draw_vbo(ctx, info); + if (ctx->draw_vbo(ctx, info, index_offset)) + batch->needs_flush = true; + + batch->num_vertices += info->count * info->instance_count; for (i = 0; i < ctx->streamout.num_targets; i++) - ctx->streamout.offsets[i] += prims; - - /* if an app (or, well, piglit test) does many thousands of draws - * without flush (or anything which implicitly flushes, like - * changing render targets), we can exceed the ringbuffer size. - * Since we don't currently have a sane way to wrapparound, and - * we use the same buffer for both draw and tiling commands, for - * now we need to do this hack and trigger flush if we are running - * low on remaining space for cmds: - */ - if (((ctx->ring->cur - ctx->ring->start) > - (ctx->ring->size/4 - FD_TILING_COMMANDS_DWORDS)) || - (fd_mesa_debug & FD_DBG_FLUSH)) - fd_context_render(pctx); -} + ctx->streamout.offsets[i] += info->count; -/* TODO figure out how to make better use of existing state mechanism - * for clear (and possibly gmem->mem / mem->gmem) so we can (a) keep - * track of what state really actually changes, and (b) reduce the code - * in the a2xx/a3xx parts. - */ + if (fd_mesa_debug & FD_DBG_DDRAW) + fd_context_all_dirty(ctx); + + fd_batch_check_size(batch); + + if (info == &new_info) + pipe_resource_reference(&indexbuf, NULL); +} static void fd_clear(struct pipe_context *pctx, unsigned buffers, const union pipe_color_union *color, double depth, unsigned stencil) { struct fd_context *ctx = fd_context(pctx); - struct pipe_framebuffer_state *pfb = &ctx->framebuffer; - struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx); + struct fd_batch *batch = fd_context_batch(ctx); + struct pipe_framebuffer_state *pfb = &batch->framebuffer; unsigned cleared_buffers; int i; + /* TODO: push down the region versions into the tiles */ + if (!fd_render_condition_check(pctx)) + return; + + fd_fence_ref(&ctx->last_fence, NULL); + + if (ctx->in_blit) { + fd_batch_reset(batch); + fd_context_all_dirty(ctx); + } + + /* pctx->clear() is only for full-surface clears, so scissor is + * equivalent to having GL_SCISSOR_TEST disabled: + */ + batch->max_scissor.minx = 0; + batch->max_scissor.miny = 0; + batch->max_scissor.maxx = pfb->width; + batch->max_scissor.maxy = pfb->height; + /* for bookkeeping about which buffers have been cleared (and thus * can fully or partially skip mem2gmem) we need to ignore buffers * that have already had a draw, in case apps do silly things like @@ -208,62 +333,65 @@ fd_clear(struct pipe_context *pctx, unsigned buffers, * something like alpha-test causes side effects from the draw in * the depth buffer, etc) */ - cleared_buffers = buffers & (FD_BUFFER_ALL & ~ctx->restore); - - /* do we have full-screen scissor? */ - if (!memcmp(scissor, &ctx->disabled_scissor, sizeof(*scissor))) { - ctx->cleared |= cleared_buffers; - } else { - ctx->partial_cleared |= cleared_buffers; - if (cleared_buffers & PIPE_CLEAR_COLOR) - ctx->cleared_scissor.color = *scissor; - if (cleared_buffers & PIPE_CLEAR_DEPTH) - ctx->cleared_scissor.depth = *scissor; - if (cleared_buffers & PIPE_CLEAR_STENCIL) - ctx->cleared_scissor.stencil = *scissor; - } - ctx->resolve |= buffers; - ctx->needs_flush = true; + cleared_buffers = buffers & (FD_BUFFER_ALL & ~batch->restore); + batch->cleared |= buffers; + batch->invalidated |= cleared_buffers; + + batch->resolve |= buffers; + batch->needs_flush = true; + + mtx_lock(&ctx->screen->lock); if (buffers & PIPE_CLEAR_COLOR) for (i = 0; i < pfb->nr_cbufs; i++) if (buffers & (PIPE_CLEAR_COLOR0 << i)) - fd_resource(pfb->cbufs[i]->texture)->dirty = true; + resource_written(batch, pfb->cbufs[i]->texture); if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) { - struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture); - if (rsc->stencil && buffers & PIPE_CLEAR_STENCIL) - rsc->stencil->dirty = true; - if (!rsc->stencil || buffers & PIPE_CLEAR_DEPTH) - rsc->dirty = true; - - ctx->gmem_reason |= FD_GMEM_CLEARS_DEPTH_STENCIL; + resource_written(batch, pfb->zsbuf->texture); + batch->gmem_reason |= FD_GMEM_CLEARS_DEPTH_STENCIL; } - DBG("%x depth=%f, stencil=%u (%s/%s)", buffers, depth, stencil, + resource_written(batch, batch->query_buf); + + list_for_each_entry(struct fd_acc_query, aq, &ctx->acc_active_queries, node) + resource_written(batch, aq->prsc); + + mtx_unlock(&ctx->screen->lock); + + DBG("%p: %x %ux%u depth=%f, stencil=%u (%s/%s)", batch, buffers, + pfb->width, pfb->height, depth, stencil, util_format_short_name(pipe_surface_format(pfb->cbufs[0])), util_format_short_name(pipe_surface_format(pfb->zsbuf))); - fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_CLEAR); + /* if per-gen backend doesn't implement ctx->clear() generic + * blitter clear: + */ + bool fallback = true; - ctx->clear(ctx, buffers, color, depth, stencil); + if (ctx->clear) { + fd_batch_set_stage(batch, FD_STAGE_CLEAR); - ctx->dirty |= FD_DIRTY_ZSA | - FD_DIRTY_VIEWPORT | - FD_DIRTY_RASTERIZER | - FD_DIRTY_SAMPLE_MASK | - FD_DIRTY_PROG | - FD_DIRTY_CONSTBUF | - FD_DIRTY_BLEND; + if (ctx->clear(ctx, buffers, color, depth, stencil)) { + if (fd_mesa_debug & FD_DBG_DCLEAR) + fd_context_all_dirty(ctx); - if (fd_mesa_debug & FD_DBG_DCLEAR) - ctx->dirty = 0xffffffff; + fallback = false; + } + } + + if (fallback) { + fd_blitter_clear(pctx, buffers, color, depth, stencil); + } + + fd_batch_check_size(batch); } static void fd_clear_render_target(struct pipe_context *pctx, struct pipe_surface *ps, const union pipe_color_union *color, - unsigned x, unsigned y, unsigned w, unsigned h) + unsigned x, unsigned y, unsigned w, unsigned h, + bool render_condition_enabled) { DBG("TODO: x=%u, y=%u, w=%u, h=%u", x, y, w, h); } @@ -271,19 +399,81 @@ fd_clear_render_target(struct pipe_context *pctx, struct pipe_surface *ps, static void fd_clear_depth_stencil(struct pipe_context *pctx, struct pipe_surface *ps, unsigned buffers, double depth, unsigned stencil, - unsigned x, unsigned y, unsigned w, unsigned h) + unsigned x, unsigned y, unsigned w, unsigned h, + bool render_condition_enabled) { DBG("TODO: buffers=%u, depth=%f, stencil=%u, x=%u, y=%u, w=%u, h=%u", buffers, depth, stencil, x, y, w, h); } +static void +fd_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info) +{ + struct fd_context *ctx = fd_context(pctx); + struct fd_batch *batch, *save_batch = NULL; + unsigned i; + + batch = fd_bc_alloc_batch(&ctx->screen->batch_cache, ctx, true); + fd_batch_reference(&save_batch, ctx->batch); + fd_batch_reference(&ctx->batch, batch); + fd_context_all_dirty(ctx); + + mtx_lock(&ctx->screen->lock); + + /* Mark SSBOs as being written.. we don't actually know which ones are + * read vs written, so just assume the worst + */ + foreach_bit(i, ctx->shaderbuf[PIPE_SHADER_COMPUTE].enabled_mask) + resource_written(batch, ctx->shaderbuf[PIPE_SHADER_COMPUTE].sb[i].buffer); + + foreach_bit(i, ctx->shaderimg[PIPE_SHADER_COMPUTE].enabled_mask) { + struct pipe_image_view *img = + &ctx->shaderimg[PIPE_SHADER_COMPUTE].si[i]; + if (img->access & PIPE_IMAGE_ACCESS_WRITE) + resource_written(batch, img->resource); + else + resource_read(batch, img->resource); + } + + /* UBO's are read */ + foreach_bit(i, ctx->constbuf[PIPE_SHADER_COMPUTE].enabled_mask) + resource_read(batch, ctx->constbuf[PIPE_SHADER_COMPUTE].cb[i].buffer); + + /* Mark textures as being read */ + foreach_bit(i, ctx->tex[PIPE_SHADER_COMPUTE].valid_textures) + resource_read(batch, ctx->tex[PIPE_SHADER_COMPUTE].textures[i]->texture); + + /* For global buffers, we don't really know if read or written, so assume + * the worst: + */ + foreach_bit(i, ctx->global_bindings.enabled_mask) + resource_written(batch, ctx->global_bindings.buf[i]); + + if (info->indirect) + resource_read(batch, info->indirect); + + mtx_unlock(&ctx->screen->lock); + + batch->needs_flush = true; + ctx->launch_grid(ctx, info); + + fd_batch_flush(batch, false); + + fd_batch_reference(&ctx->batch, save_batch); + fd_context_all_dirty(ctx); + fd_batch_reference(&save_batch, NULL); + fd_batch_reference(&batch, NULL); +} + void fd_draw_init(struct pipe_context *pctx) { - list_inithead(&fd_context(pctx)->used_resources); - pctx->draw_vbo = fd_draw_vbo; pctx->clear = fd_clear; pctx->clear_render_target = fd_clear_render_target; pctx->clear_depth_stencil = fd_clear_depth_stencil; + + if (has_compute(fd_screen(pctx->screen))) { + pctx->launch_grid = fd_launch_grid; + } }