X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Ffreedreno%2Ffreedreno_draw.c;h=059de2ec8c612dcf9f1165bd4adc834f3b5a73a7;hb=c179ded9cb1bc3e42b887c1d3362c86befc3bbcc;hp=08cba77751061bcc2f5d2b6a79f40e0c41b53632;hpb=06a51fb4e5bcec4fa9911b6a20a38deac45d9e21;p=mesa.git diff --git a/src/gallium/drivers/freedreno/freedreno_draw.c b/src/gallium/drivers/freedreno/freedreno_draw.c index 08cba777510..059de2ec8c6 100644 --- a/src/gallium/drivers/freedreno/freedreno_draw.c +++ b/src/gallium/drivers/freedreno/freedreno_draw.c @@ -1,5 +1,3 @@ -/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */ - /* * Copyright (C) 2012 Rob Clark * @@ -27,14 +25,17 @@ */ #include "pipe/p_state.h" +#include "util/u_draw.h" #include "util/u_string.h" #include "util/u_memory.h" #include "util/u_prim.h" #include "util/u_format.h" #include "util/u_helpers.h" +#include "freedreno_blitter.h" #include "freedreno_draw.h" #include "freedreno_context.h" +#include "freedreno_fence.h" #include "freedreno_state.h" #include "freedreno_resource.h" #include "freedreno_query_acc.h" @@ -61,22 +62,24 @@ static void fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) { struct fd_context *ctx = fd_context(pctx); - struct fd_batch *batch = ctx->batch; + struct fd_batch *batch = fd_context_batch(ctx); struct pipe_framebuffer_state *pfb = &batch->framebuffer; - struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx); - unsigned i, prims, buffers = 0; + unsigned i, prims, buffers = 0, restore_buffers = 0; + + /* for debugging problems with indirect draw, it is convenient + * to be able to emulate it, to determine if game is feeding us + * bogus data: + */ + if (info->indirect && (fd_mesa_debug & FD_DBG_NOINDR)) { + util_draw_indirect(pctx, info); + return; + } if (!info->count_from_stream_output && !info->indirect && !info->primitive_restart && !u_trim_pipe_prim(info->mode, (unsigned*)&info->count)) return; - /* if we supported transform feedback, we'd have to disable this: */ - if (((scissor->maxx - scissor->minx) * - (scissor->maxy - scissor->miny)) == 0) { - return; - } - /* TODO: push down the region versions into the tiles */ if (!fd_render_condition_check(pctx)) return; @@ -90,6 +93,8 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) return; } + fd_fence_ref(&ctx->last_fence, NULL); + /* Upload a user index buffer. */ struct pipe_resource *indexbuf = NULL; unsigned index_offset = 0; @@ -126,16 +131,32 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) mtx_lock(&ctx->screen->lock); - if (fd_depth_enabled(ctx)) { - buffers |= FD_BUFFER_DEPTH; - resource_written(batch, pfb->zsbuf->texture); - batch->gmem_reason |= FD_GMEM_DEPTH_ENABLED; - } + if (ctx->dirty & (FD_DIRTY_FRAMEBUFFER | FD_DIRTY_ZSA)) { + if (fd_depth_enabled(ctx)) { + if (fd_resource(pfb->zsbuf->texture)->valid) { + restore_buffers |= FD_BUFFER_DEPTH; + } else { + batch->invalidated |= FD_BUFFER_DEPTH; + } + batch->gmem_reason |= FD_GMEM_DEPTH_ENABLED; + if (fd_depth_write_enabled(ctx)) { + buffers |= FD_BUFFER_DEPTH; + resource_written(batch, pfb->zsbuf->texture); + } else { + resource_read(batch, pfb->zsbuf->texture); + } + } - if (fd_stencil_enabled(ctx)) { - buffers |= FD_BUFFER_STENCIL; - resource_written(batch, pfb->zsbuf->texture); - batch->gmem_reason |= FD_GMEM_STENCIL_ENABLED; + if (fd_stencil_enabled(ctx)) { + if (fd_resource(pfb->zsbuf->texture)->valid) { + restore_buffers |= FD_BUFFER_STENCIL; + } else { + batch->invalidated |= FD_BUFFER_STENCIL; + } + batch->gmem_reason |= FD_GMEM_STENCIL_ENABLED; + buffers |= FD_BUFFER_STENCIL; + resource_written(batch, pfb->zsbuf->texture); + } } if (fd_logicop_enabled(ctx)) @@ -149,46 +170,82 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) surf = pfb->cbufs[i]->texture; - resource_written(batch, surf); - buffers |= PIPE_CLEAR_COLOR0 << i; + if (fd_resource(surf)->valid) { + restore_buffers |= PIPE_CLEAR_COLOR0 << i; + } else { + batch->invalidated |= PIPE_CLEAR_COLOR0 << i; + } - if (surf->nr_samples > 1) - batch->gmem_reason |= FD_GMEM_MSAA_ENABLED; + buffers |= PIPE_CLEAR_COLOR0 << i; if (fd_blend_enabled(ctx, i)) batch->gmem_reason |= FD_GMEM_BLEND_ENABLED; + + if (ctx->dirty & FD_DIRTY_FRAMEBUFFER) + resource_written(batch, pfb->cbufs[i]->texture); } /* Mark SSBOs as being written.. we don't actually know which ones are * read vs written, so just assume the worst */ - foreach_bit(i, ctx->shaderbuf[PIPE_SHADER_FRAGMENT].enabled_mask) - resource_read(batch, ctx->shaderbuf[PIPE_SHADER_FRAGMENT].sb[i].buffer); + if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_SSBO) { + foreach_bit(i, ctx->shaderbuf[PIPE_SHADER_FRAGMENT].enabled_mask) + resource_written(batch, ctx->shaderbuf[PIPE_SHADER_FRAGMENT].sb[i].buffer); + } + + if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_IMAGE) { + foreach_bit(i, ctx->shaderimg[PIPE_SHADER_FRAGMENT].enabled_mask) { + struct pipe_image_view *img = + &ctx->shaderimg[PIPE_SHADER_FRAGMENT].si[i]; + if (img->access & PIPE_IMAGE_ACCESS_WRITE) + resource_written(batch, img->resource); + else + resource_read(batch, img->resource); + } + } + + if (ctx->dirty_shader[PIPE_SHADER_VERTEX] & FD_DIRTY_SHADER_CONST) { + foreach_bit(i, ctx->constbuf[PIPE_SHADER_VERTEX].enabled_mask) + resource_read(batch, ctx->constbuf[PIPE_SHADER_VERTEX].cb[i].buffer); + } - foreach_bit(i, ctx->constbuf[PIPE_SHADER_VERTEX].enabled_mask) - resource_read(batch, ctx->constbuf[PIPE_SHADER_VERTEX].cb[i].buffer); - foreach_bit(i, ctx->constbuf[PIPE_SHADER_FRAGMENT].enabled_mask) - resource_read(batch, ctx->constbuf[PIPE_SHADER_FRAGMENT].cb[i].buffer); + if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_CONST) { + foreach_bit(i, ctx->constbuf[PIPE_SHADER_FRAGMENT].enabled_mask) + resource_read(batch, ctx->constbuf[PIPE_SHADER_FRAGMENT].cb[i].buffer); + } /* Mark VBOs as being read */ - foreach_bit(i, ctx->vtx.vertexbuf.enabled_mask) { - assert(!ctx->vtx.vertexbuf.vb[i].is_user_buffer); - resource_read(batch, ctx->vtx.vertexbuf.vb[i].buffer.resource); + if (ctx->dirty & FD_DIRTY_VTXBUF) { + foreach_bit(i, ctx->vtx.vertexbuf.enabled_mask) { + assert(!ctx->vtx.vertexbuf.vb[i].is_user_buffer); + resource_read(batch, ctx->vtx.vertexbuf.vb[i].buffer.resource); + } } /* Mark index buffer as being read */ resource_read(batch, indexbuf); + /* Mark indirect draw buffer as being read */ + if (info->indirect) + resource_read(batch, info->indirect->buffer); + /* Mark textures as being read */ - foreach_bit(i, ctx->tex[PIPE_SHADER_VERTEX].valid_textures) - resource_read(batch, ctx->tex[PIPE_SHADER_VERTEX].textures[i]->texture); - foreach_bit(i, ctx->tex[PIPE_SHADER_FRAGMENT].valid_textures) - resource_read(batch, ctx->tex[PIPE_SHADER_FRAGMENT].textures[i]->texture); + if (ctx->dirty_shader[PIPE_SHADER_VERTEX] & FD_DIRTY_SHADER_TEX) { + foreach_bit(i, ctx->tex[PIPE_SHADER_VERTEX].valid_textures) + resource_read(batch, ctx->tex[PIPE_SHADER_VERTEX].textures[i]->texture); + } + + if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_TEX) { + foreach_bit(i, ctx->tex[PIPE_SHADER_FRAGMENT].valid_textures) + resource_read(batch, ctx->tex[PIPE_SHADER_FRAGMENT].textures[i]->texture); + } /* Mark streamout buffers as being written.. */ - for (i = 0; i < ctx->streamout.num_targets; i++) - if (ctx->streamout.targets[i]) - resource_written(batch, ctx->streamout.targets[i]->buffer); + if (ctx->dirty & FD_DIRTY_STREAMOUT) { + for (i = 0; i < ctx->streamout.num_targets; i++) + if (ctx->streamout.targets[i]) + resource_written(batch, ctx->streamout.targets[i]->buffer); + } resource_written(batch, batch->query_buf); @@ -214,7 +271,7 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) ctx->stats.prims_generated += prims; /* any buffers that haven't been cleared yet, we need to restore: */ - batch->restore |= buffers & (FD_BUFFER_ALL & ~batch->cleared); + batch->restore |= restore_buffers & (FD_BUFFER_ALL & ~batch->invalidated); /* and any buffers used, need to be resolved: */ batch->resolve |= buffers; @@ -226,6 +283,8 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) if (ctx->draw_vbo(ctx, info, index_offset)) batch->needs_flush = true; + batch->num_vertices += info->count * info->instance_count; + for (i = 0; i < ctx->streamout.num_targets; i++) ctx->streamout.offsets[i] += info->count; @@ -238,89 +297,13 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info) pipe_resource_reference(&indexbuf, NULL); } -/* Generic clear implementation (partially) using u_blitter: */ -static void -fd_blitter_clear(struct pipe_context *pctx, unsigned buffers, - const union pipe_color_union *color, double depth, unsigned stencil) -{ - struct fd_context *ctx = fd_context(pctx); - struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer; - struct blitter_context *blitter = ctx->blitter; - - fd_blitter_pipe_begin(ctx, false, true, FD_STAGE_CLEAR); - - util_blitter_common_clear_setup(blitter, pfb->width, pfb->height, - buffers, NULL, NULL); - - struct pipe_stencil_ref sr = { - .ref_value = { stencil & 0xff } - }; - pctx->set_stencil_ref(pctx, &sr); - - struct pipe_constant_buffer cb = { - .buffer_size = 16, - .user_buffer = &color->ui, - }; - pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 0, &cb); - - if (!ctx->clear_rs_state) { - const struct pipe_rasterizer_state tmpl = { - .cull_face = PIPE_FACE_NONE, - .half_pixel_center = 1, - .bottom_edge_rule = 1, - .flatshade = 1, - .depth_clip = 1, - }; - ctx->clear_rs_state = pctx->create_rasterizer_state(pctx, &tmpl); - } - pctx->bind_rasterizer_state(pctx, ctx->clear_rs_state); - - struct pipe_viewport_state vp = { - .scale = { 0.5f * pfb->width, -0.5f * pfb->height, depth }, - .translate = { 0.5f * pfb->width, 0.5f * pfb->height, 0.0f }, - }; - pctx->set_viewport_states(pctx, 0, 1, &vp); - - pctx->bind_vertex_elements_state(pctx, ctx->solid_vbuf_state.vtx); - pctx->set_vertex_buffers(pctx, blitter->vb_slot, 1, - &ctx->solid_vbuf_state.vertexbuf.vb[0]); - pctx->set_stream_output_targets(pctx, 0, NULL, NULL); - pctx->bind_vs_state(pctx, ctx->solid_prog.vp); - pctx->bind_fs_state(pctx, ctx->solid_prog.fp); - - struct pipe_draw_info info = { - .mode = PIPE_PRIM_MAX, /* maps to DI_PT_RECTLIST */ - .count = 2, - .max_index = 1, - .instance_count = 1, - }; - ctx->draw_vbo(ctx, &info, 0); - - util_blitter_restore_constant_buffer_state(blitter); - util_blitter_restore_vertex_states(blitter); - util_blitter_restore_fragment_states(blitter); - util_blitter_restore_textures(blitter); - util_blitter_restore_fb_state(blitter); - util_blitter_restore_render_cond(blitter); - util_blitter_unset_running_flag(blitter); - - fd_blitter_pipe_end(ctx); -} - -/* TODO figure out how to make better use of existing state mechanism - * for clear (and possibly gmem->mem / mem->gmem) so we can (a) keep - * track of what state really actually changes, and (b) reduce the code - * in the a2xx/a3xx parts. - */ - static void fd_clear(struct pipe_context *pctx, unsigned buffers, const union pipe_color_union *color, double depth, unsigned stencil) { struct fd_context *ctx = fd_context(pctx); - struct fd_batch *batch = ctx->batch; + struct fd_batch *batch = fd_context_batch(ctx); struct pipe_framebuffer_state *pfb = &batch->framebuffer; - struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx); unsigned cleared_buffers; int i; @@ -328,11 +311,21 @@ fd_clear(struct pipe_context *pctx, unsigned buffers, if (!fd_render_condition_check(pctx)) return; + fd_fence_ref(&ctx->last_fence, NULL); + if (ctx->in_blit) { fd_batch_reset(batch); fd_context_all_dirty(ctx); } + /* pctx->clear() is only for full-surface clears, so scissor is + * equivalent to having GL_SCISSOR_TEST disabled: + */ + batch->max_scissor.minx = 0; + batch->max_scissor.miny = 0; + batch->max_scissor.maxx = pfb->width; + batch->max_scissor.maxy = pfb->height; + /* for bookkeeping about which buffers have been cleared (and thus * can fully or partially skip mem2gmem) we need to ignore buffers * that have already had a draw, in case apps do silly things like @@ -341,19 +334,9 @@ fd_clear(struct pipe_context *pctx, unsigned buffers, * the depth buffer, etc) */ cleared_buffers = buffers & (FD_BUFFER_ALL & ~batch->restore); + batch->cleared |= buffers; + batch->invalidated |= cleared_buffers; - /* do we have full-screen scissor? */ - if (!memcmp(scissor, &ctx->disabled_scissor, sizeof(*scissor))) { - batch->cleared |= cleared_buffers; - } else { - batch->partial_cleared |= cleared_buffers; - if (cleared_buffers & PIPE_CLEAR_COLOR) - batch->cleared_scissor.color = *scissor; - if (cleared_buffers & PIPE_CLEAR_DEPTH) - batch->cleared_scissor.depth = *scissor; - if (cleared_buffers & PIPE_CLEAR_STENCIL) - batch->cleared_scissor.stencil = *scissor; - } batch->resolve |= buffers; batch->needs_flush = true; @@ -384,17 +367,24 @@ fd_clear(struct pipe_context *pctx, unsigned buffers, /* if per-gen backend doesn't implement ctx->clear() generic * blitter clear: */ - if (!ctx->clear) { - fd_blitter_clear(pctx, buffers, color, depth, stencil); - return; - } + bool fallback = true; - fd_batch_set_stage(batch, FD_STAGE_CLEAR); + if (ctx->clear) { + fd_batch_set_stage(batch, FD_STAGE_CLEAR); - ctx->clear(ctx, buffers, color, depth, stencil); + if (ctx->clear(ctx, buffers, color, depth, stencil)) { + if (fd_mesa_debug & FD_DBG_DCLEAR) + fd_context_all_dirty(ctx); - if (fd_mesa_debug & FD_DBG_DCLEAR) - fd_context_all_dirty(ctx); + fallback = false; + } + } + + if (fallback) { + fd_blitter_clear(pctx, buffers, color, depth, stencil); + } + + fd_batch_check_size(batch); } static void @@ -423,15 +413,10 @@ fd_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info) struct fd_batch *batch, *save_batch = NULL; unsigned i; - /* TODO maybe we don't want to allocate and flush a batch each time? - * We could use a special bogus (ie. won't match any fb state) key - * in the batch-case for compute shaders, and rely on the rest of - * the dependency tracking mechanism to tell us when the compute - * batch needs to be flushed? - */ - batch = fd_bc_alloc_batch(&ctx->screen->batch_cache, ctx); + batch = fd_bc_alloc_batch(&ctx->screen->batch_cache, ctx, true); fd_batch_reference(&save_batch, ctx->batch); fd_batch_reference(&ctx->batch, batch); + fd_context_all_dirty(ctx); mtx_lock(&ctx->screen->lock); @@ -439,7 +424,16 @@ fd_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info) * read vs written, so just assume the worst */ foreach_bit(i, ctx->shaderbuf[PIPE_SHADER_COMPUTE].enabled_mask) - resource_read(batch, ctx->shaderbuf[PIPE_SHADER_COMPUTE].sb[i].buffer); + resource_written(batch, ctx->shaderbuf[PIPE_SHADER_COMPUTE].sb[i].buffer); + + foreach_bit(i, ctx->shaderimg[PIPE_SHADER_COMPUTE].enabled_mask) { + struct pipe_image_view *img = + &ctx->shaderimg[PIPE_SHADER_COMPUTE].si[i]; + if (img->access & PIPE_IMAGE_ACCESS_WRITE) + resource_written(batch, img->resource); + else + resource_read(batch, img->resource); + } /* UBO's are read */ foreach_bit(i, ctx->constbuf[PIPE_SHADER_COMPUTE].enabled_mask) @@ -449,14 +443,26 @@ fd_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info) foreach_bit(i, ctx->tex[PIPE_SHADER_COMPUTE].valid_textures) resource_read(batch, ctx->tex[PIPE_SHADER_COMPUTE].textures[i]->texture); + /* For global buffers, we don't really know if read or written, so assume + * the worst: + */ + foreach_bit(i, ctx->global_bindings.enabled_mask) + resource_written(batch, ctx->global_bindings.buf[i]); + + if (info->indirect) + resource_read(batch, info->indirect); + mtx_unlock(&ctx->screen->lock); + batch->needs_flush = true; ctx->launch_grid(ctx, info); - fd_gmem_flush_compute(batch); + fd_batch_flush(batch, false); fd_batch_reference(&ctx->batch, save_batch); + fd_context_all_dirty(ctx); fd_batch_reference(&save_batch, NULL); + fd_batch_reference(&batch, NULL); } void