X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;ds=sidebyside;f=src%2Fgallium%2Fdrivers%2Ffreedreno%2Ffreedreno_gmem.c;h=37a2f33365daae527cb56fe5bf709f3e2a59527e;hb=97894b1267923dee25ea5263e547ac8822ef7095;hp=4040d1f76152f6862188cdb9d639020394ee561c;hpb=f24e910da40c7c813b7da009269cd994cf6ff375;p=mesa.git diff --git a/src/gallium/drivers/freedreno/freedreno_gmem.c b/src/gallium/drivers/freedreno/freedreno_gmem.c index 4040d1f7615..37a2f33365d 100644 --- a/src/gallium/drivers/freedreno/freedreno_gmem.c +++ b/src/gallium/drivers/freedreno/freedreno_gmem.c @@ -34,6 +34,7 @@ #include "freedreno_gmem.h" #include "freedreno_context.h" +#include "freedreno_fence.h" #include "freedreno_resource.h" #include "freedreno_query_hw.h" #include "freedreno_util.h" @@ -67,87 +68,132 @@ * resolve. */ -static uint32_t bin_width(struct fd_context *ctx) +static uint32_t bin_width(struct fd_screen *screen) { - if (is_a4xx(ctx->screen)) + if (is_a4xx(screen) || is_a5xx(screen)) return 1024; - if (is_a3xx(ctx->screen)) + if (is_a3xx(screen)) return 992; return 512; } +static uint32_t +total_size(uint8_t cbuf_cpp[], uint8_t zsbuf_cpp[2], + uint32_t bin_w, uint32_t bin_h, struct fd_gmem_stateobj *gmem) +{ + uint32_t total = 0, i; + + for (i = 0; i < MAX_RENDER_TARGETS; i++) { + if (cbuf_cpp[i]) { + gmem->cbuf_base[i] = align(total, 0x4000); + total = gmem->cbuf_base[i] + cbuf_cpp[i] * bin_w * bin_h; + } + } + + if (zsbuf_cpp[0]) { + gmem->zsbuf_base[0] = align(total, 0x4000); + total = gmem->zsbuf_base[0] + zsbuf_cpp[0] * bin_w * bin_h; + } + + if (zsbuf_cpp[1]) { + gmem->zsbuf_base[1] = align(total, 0x4000); + total = gmem->zsbuf_base[1] + zsbuf_cpp[1] * bin_w * bin_h; + } + + return total; +} + static void -calculate_tiles(struct fd_context *ctx) +calculate_tiles(struct fd_batch *batch) { + struct fd_context *ctx = batch->ctx; struct fd_gmem_stateobj *gmem = &ctx->gmem; - struct pipe_scissor_state *scissor = &ctx->max_scissor; - struct pipe_framebuffer_state *pfb = &ctx->framebuffer; - uint32_t gmem_size = ctx->screen->gmemsize_bytes; + struct pipe_scissor_state *scissor = &batch->max_scissor; + struct pipe_framebuffer_state *pfb = &batch->framebuffer; + const uint32_t gmem_alignw = ctx->screen->gmem_alignw; + const uint32_t gmem_alignh = ctx->screen->gmem_alignh; + const unsigned npipes = ctx->screen->num_vsc_pipes; + const uint32_t gmem_size = ctx->screen->gmemsize_bytes; uint32_t minx, miny, width, height; uint32_t nbins_x = 1, nbins_y = 1; uint32_t bin_w, bin_h; - uint32_t max_width = bin_width(ctx); - uint32_t cpp = 4; + uint32_t max_width = bin_width(ctx->screen); + uint8_t cbuf_cpp[MAX_RENDER_TARGETS] = {0}, zsbuf_cpp[2] = {0}; uint32_t i, j, t, xoff, yoff; uint32_t tpp_x, tpp_y; - bool has_zs = !!(ctx->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)); + bool has_zs = !!(batch->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)); + int tile_n[npipes]; - if (pfb->cbufs[0]) - cpp = util_format_get_blocksize(pfb->cbufs[0]->format); + if (has_zs) { + struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture); + zsbuf_cpp[0] = rsc->cpp; + if (rsc->stencil) + zsbuf_cpp[1] = rsc->stencil->cpp; + } + for (i = 0; i < pfb->nr_cbufs; i++) { + if (pfb->cbufs[i]) + cbuf_cpp[i] = util_format_get_blocksize(pfb->cbufs[i]->format); + else + cbuf_cpp[i] = 4; + } - if ((gmem->cpp == cpp) && (gmem->has_zs == has_zs) && - !memcmp(&gmem->scissor, scissor, sizeof(gmem->scissor))) { + if (!memcmp(gmem->zsbuf_cpp, zsbuf_cpp, sizeof(zsbuf_cpp)) && + !memcmp(gmem->cbuf_cpp, cbuf_cpp, sizeof(cbuf_cpp)) && + !memcmp(&gmem->scissor, scissor, sizeof(gmem->scissor))) { /* everything is up-to-date */ return; } - /* if have depth/stencil, we need to leave room: */ - if (has_zs) { - gmem_size /= 2; - max_width /= 2; - } - if (fd_mesa_debug & FD_DBG_NOSCIS) { minx = 0; miny = 0; width = pfb->width; height = pfb->height; } else { - minx = scissor->minx & ~31; /* round down to multiple of 32 */ - miny = scissor->miny & ~31; + /* round down to multiple of alignment: */ + minx = scissor->minx & ~(gmem_alignw - 1); + miny = scissor->miny & ~(gmem_alignh - 1); width = scissor->maxx - minx; height = scissor->maxy - miny; } - bin_w = align(width, 32); - bin_h = align(height, 32); + bin_w = align(width, gmem_alignw); + bin_h = align(height, gmem_alignh); /* first, find a bin width that satisfies the maximum width * restrictions: */ while (bin_w > max_width) { nbins_x++; - bin_w = align(width / nbins_x, 32); + bin_w = align(width / nbins_x, gmem_alignw); + } + + if (fd_mesa_debug & FD_DBG_MSGS) { + debug_printf("binning input: cbuf cpp:"); + for (i = 0; i < pfb->nr_cbufs; i++) + debug_printf(" %d", cbuf_cpp[i]); + debug_printf(", zsbuf cpp: %d; %dx%d\n", + zsbuf_cpp[0], width, height); } /* then find a bin width/height that satisfies the memory * constraints: */ - while ((bin_w * bin_h * cpp) > gmem_size) { + while (total_size(cbuf_cpp, zsbuf_cpp, bin_w, bin_h, gmem) > gmem_size) { if (bin_w > bin_h) { nbins_x++; - bin_w = align(width / nbins_x, 32); + bin_w = align(width / nbins_x, gmem_alignw); } else { nbins_y++; - bin_h = align(height / nbins_y, 32); + bin_h = align(height / nbins_y, gmem_alignh); } } DBG("using %d bins of size %dx%d", nbins_x*nbins_y, bin_w, bin_h); gmem->scissor = *scissor; - gmem->cpp = cpp; - gmem->has_zs = has_zs; + memcpy(gmem->cbuf_cpp, cbuf_cpp, sizeof(cbuf_cpp)); + memcpy(gmem->zsbuf_cpp, zsbuf_cpp, sizeof(zsbuf_cpp)); gmem->bin_h = bin_h; gmem->bin_w = bin_w; gmem->nbins_x = nbins_x; @@ -174,10 +220,13 @@ calculate_tiles(struct fd_context *ctx) div_round_up(nbins_x, tpp_x)) > 8) tpp_x += 1; + gmem->maxpw = tpp_x; + gmem->maxph = tpp_y; + /* configure pipes: */ xoff = yoff = 0; - for (i = 0; i < ARRAY_SIZE(ctx->pipe); i++) { - struct fd_vsc_pipe *pipe = &ctx->pipe[i]; + for (i = 0; i < npipes; i++) { + struct fd_vsc_pipe *pipe = &ctx->vsc_pipe[i]; if (xoff >= nbins_x) { xoff = 0; @@ -196,8 +245,8 @@ calculate_tiles(struct fd_context *ctx) xoff += tpp_x; } - for (; i < ARRAY_SIZE(ctx->pipe); i++) { - struct fd_vsc_pipe *pipe = &ctx->pipe[i]; + for (; i < npipes; i++) { + struct fd_vsc_pipe *pipe = &ctx->vsc_pipe[i]; pipe->x = pipe->y = pipe->w = pipe->h = 0; } @@ -213,6 +262,7 @@ calculate_tiles(struct fd_context *ctx) /* configure tiles: */ t = 0; yoff = miny; + memset(tile_n, 0, sizeof(tile_n)); for (i = 0; i < nbins_y; i++) { uint32_t bw, bh; @@ -223,20 +273,17 @@ calculate_tiles(struct fd_context *ctx) for (j = 0; j < nbins_x; j++) { struct fd_tile *tile = &ctx->tile[t]; - uint32_t n, p; + uint32_t p; assert(t < ARRAY_SIZE(ctx->tile)); /* pipe number: */ p = ((i / tpp_y) * div_round_up(nbins_x, tpp_x)) + (j / tpp_x); - /* slot number: */ - n = ((i % tpp_y) * tpp_x) + (j % tpp_x); - /* clip bin width: */ bw = MIN2(bin_w, minx + width - xoff); - tile->n = n; + tile->n = tile_n[p]++; tile->p = p; tile->bin_w = bw; tile->bin_h = bh; @@ -264,14 +311,15 @@ calculate_tiles(struct fd_context *ctx) } static void -render_tiles(struct fd_context *ctx) +render_tiles(struct fd_batch *batch) { + struct fd_context *ctx = batch->ctx; struct fd_gmem_stateobj *gmem = &ctx->gmem; int i; - ctx->emit_tile_init(ctx); + ctx->emit_tile_init(batch); - if (ctx->restore) + if (batch->restore) ctx->stats.batch_restore++; for (i = 0; i < (gmem->nbins_x * gmem->nbins_y); i++) { @@ -280,109 +328,129 @@ render_tiles(struct fd_context *ctx) DBG("bin_h=%d, yoff=%d, bin_w=%d, xoff=%d", tile->bin_h, tile->yoff, tile->bin_w, tile->xoff); - ctx->emit_tile_prep(ctx, tile); + ctx->emit_tile_prep(batch, tile); - if (ctx->restore) { - fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_MEM2GMEM); - ctx->emit_tile_mem2gmem(ctx, tile); - fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_NULL); + if (batch->restore) { + ctx->emit_tile_mem2gmem(batch, tile); } - ctx->emit_tile_renderprep(ctx, tile); + ctx->emit_tile_renderprep(batch, tile); - fd_hw_query_prepare_tile(ctx, i, ctx->ring); + if (ctx->query_prepare_tile) + ctx->query_prepare_tile(batch, i, batch->gmem); /* emit IB to drawcmds: */ - OUT_IB(ctx->ring, ctx->draw_start, ctx->draw_end); - fd_reset_wfi(ctx); + ctx->emit_ib(batch->gmem, batch->draw); + fd_reset_wfi(batch); /* emit gmem2mem to transfer tile back to system memory: */ - fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_GMEM2MEM); - ctx->emit_tile_gmem2mem(ctx, tile); - fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_NULL); + ctx->emit_tile_gmem2mem(batch, tile); } + + if (ctx->emit_tile_fini) + ctx->emit_tile_fini(batch); } static void -render_sysmem(struct fd_context *ctx) +render_sysmem(struct fd_batch *batch) { - ctx->emit_sysmem_prep(ctx); + struct fd_context *ctx = batch->ctx; + + ctx->emit_sysmem_prep(batch); - fd_hw_query_prepare_tile(ctx, 0, ctx->ring); + if (ctx->query_prepare_tile) + ctx->query_prepare_tile(batch, 0, batch->gmem); /* emit IB to drawcmds: */ - OUT_IB(ctx->ring, ctx->draw_start, ctx->draw_end); - fd_reset_wfi(ctx); + ctx->emit_ib(batch->gmem, batch->draw); + fd_reset_wfi(batch); + + if (ctx->emit_sysmem_fini) + ctx->emit_sysmem_fini(batch); +} + +static void +flush_ring(struct fd_batch *batch) +{ + /* for compute/blit batch, there is no batch->gmem, only batch->draw: */ + struct fd_ringbuffer *ring = batch->nondraw ? batch->draw : batch->gmem; + uint32_t timestamp; + int out_fence_fd = -1; + + fd_ringbuffer_flush2(ring, batch->in_fence_fd, + batch->needs_out_fence_fd ? &out_fence_fd : NULL); + + timestamp = fd_ringbuffer_timestamp(ring); + fd_fence_populate(batch->fence, timestamp, out_fence_fd); } void -fd_gmem_render_tiles(struct fd_context *ctx) +fd_gmem_render_tiles(struct fd_batch *batch) { - struct pipe_framebuffer_state *pfb = &ctx->framebuffer; - uint32_t timestamp = 0; + struct fd_context *ctx = batch->ctx; + struct pipe_framebuffer_state *pfb = &batch->framebuffer; bool sysmem = false; - if (ctx->emit_sysmem_prep) { - if (ctx->cleared || ctx->gmem_reason || (ctx->num_draws > 5)) { + if (ctx->emit_sysmem_prep && !batch->nondraw) { + if (batch->cleared || batch->gmem_reason || + ((batch->num_draws > 5) && !batch->blit)) { DBG("GMEM: cleared=%x, gmem_reason=%x, num_draws=%u", - ctx->cleared, ctx->gmem_reason, ctx->num_draws); + batch->cleared, batch->gmem_reason, batch->num_draws); } else if (!(fd_mesa_debug & FD_DBG_NOBYPASS)) { sysmem = true; } - } - - /* close out the draw cmds by making sure any active queries are - * paused: - */ - fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_NULL); - /* mark the end of the clear/draw cmds before emitting per-tile cmds: */ - fd_ringmarker_mark(ctx->draw_end); - fd_ringmarker_mark(ctx->binning_end); + /* For ARB_framebuffer_no_attachments: */ + if ((pfb->nr_cbufs == 0) && !pfb->zsbuf) { + sysmem = true; + } + } - fd_reset_wfi(ctx); + fd_reset_wfi(batch); ctx->stats.batch_total++; - if (sysmem) { - DBG("rendering sysmem (%s/%s)", + if (batch->nondraw) { + DBG("%p: rendering non-draw", batch); + ctx->stats.batch_nondraw++; + } else if (sysmem) { + DBG("%p: rendering sysmem %ux%u (%s/%s)", + batch, pfb->width, pfb->height, util_format_short_name(pipe_surface_format(pfb->cbufs[0])), util_format_short_name(pipe_surface_format(pfb->zsbuf))); - fd_hw_query_prepare(ctx, 1); - render_sysmem(ctx); + if (ctx->query_prepare) + ctx->query_prepare(batch, 1); + render_sysmem(batch); ctx->stats.batch_sysmem++; } else { struct fd_gmem_stateobj *gmem = &ctx->gmem; - calculate_tiles(ctx); - DBG("rendering %dx%d tiles (%s/%s)", gmem->nbins_x, gmem->nbins_y, + calculate_tiles(batch); + DBG("%p: rendering %dx%d tiles %ux%u (%s/%s)", + batch, pfb->width, pfb->height, gmem->nbins_x, gmem->nbins_y, util_format_short_name(pipe_surface_format(pfb->cbufs[0])), util_format_short_name(pipe_surface_format(pfb->zsbuf))); - fd_hw_query_prepare(ctx, gmem->nbins_x * gmem->nbins_y); - render_tiles(ctx); + if (ctx->query_prepare) + ctx->query_prepare(batch, gmem->nbins_x * gmem->nbins_y); + render_tiles(batch); ctx->stats.batch_gmem++; } - /* GPU executes starting from tile cmds, which IB back to draw cmds: */ - fd_ringmarker_flush(ctx->draw_end); - - /* mark start for next draw/binning cmds: */ - fd_ringmarker_mark(ctx->draw_start); - fd_ringmarker_mark(ctx->binning_start); - - fd_reset_wfi(ctx); - - /* update timestamps on render targets: */ - timestamp = fd_ringbuffer_timestamp(ctx->ring); - if (pfb->cbufs[0]) - fd_resource(pfb->cbufs[0]->texture)->timestamp = timestamp; - if (pfb->zsbuf) - fd_resource(pfb->zsbuf->texture)->timestamp = timestamp; + flush_ring(batch); +} - /* reset maximal bounds: */ - ctx->max_scissor.minx = ctx->max_scissor.miny = ~0; - ctx->max_scissor.maxx = ctx->max_scissor.maxy = 0; +/* special case for when we need to create a fence but have no rendering + * to flush.. just emit a no-op string-marker packet. + */ +void +fd_gmem_render_noop(struct fd_batch *batch) +{ + struct fd_context *ctx = batch->ctx; + struct pipe_context *pctx = &ctx->base; - ctx->dirty = ~0; + pctx->emit_string_marker(pctx, "noop", 4); + /* emit IB to drawcmds (which contain the string marker): */ + ctx->emit_ib(batch->gmem, batch->draw); + flush_ring(batch); } /* tile needs restore if it isn't completely contained within the @@ -405,26 +473,26 @@ skip_restore(struct pipe_scissor_state *scissor, struct fd_tile *tile) * case would be a single clear. */ bool -fd_gmem_needs_restore(struct fd_context *ctx, struct fd_tile *tile, +fd_gmem_needs_restore(struct fd_batch *batch, struct fd_tile *tile, uint32_t buffers) { - if (!(ctx->restore & buffers)) + if (!(batch->restore & buffers)) return false; /* if buffers partially cleared, then slow-path to figure out * if this particular tile needs restoring: */ if ((buffers & FD_BUFFER_COLOR) && - (ctx->partial_cleared & FD_BUFFER_COLOR) && - skip_restore(&ctx->cleared_scissor.color, tile)) + (batch->partial_cleared & FD_BUFFER_COLOR) && + skip_restore(&batch->cleared_scissor.color, tile)) return false; if ((buffers & FD_BUFFER_DEPTH) && - (ctx->partial_cleared & FD_BUFFER_DEPTH) && - skip_restore(&ctx->cleared_scissor.depth, tile)) + (batch->partial_cleared & FD_BUFFER_DEPTH) && + skip_restore(&batch->cleared_scissor.depth, tile)) return false; if ((buffers & FD_BUFFER_STENCIL) && - (ctx->partial_cleared & FD_BUFFER_STENCIL) && - skip_restore(&ctx->cleared_scissor.stencil, tile)) + (batch->partial_cleared & FD_BUFFER_STENCIL) && + skip_restore(&batch->cleared_scissor.stencil, tile)) return false; return true;