Prep work to reduce churn in next patch.
Signed-off-by: Rob Clark <robdclark@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/merge_requests/3503>
static bool
use_hw_binning(struct fd_batch *batch)
{
- const struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
/* we hardcoded a limit of 8 "pipes", we can increase this limit
* at the cost of a slightly larger command stream
{
struct fd_context *ctx = batch->ctx;
struct fd2_context *fd2_ctx = fd2_context(ctx);
- const struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
struct fd_ringbuffer *ring;
{
struct fd_context *ctx = batch->ctx;
struct fd2_context *fd2_ctx = fd2_context(ctx);
- const struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct fd_ringbuffer *ring = batch->gmem;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
unsigned bin_w = tile->bin_w;
struct fd_context *ctx = batch->ctx;
struct fd_ringbuffer *ring = batch->gmem;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
- const struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
enum pipe_format format = pipe_surface_format(pfb->cbufs[0]);
uint32_t reg;
static bool
use_hw_binning(struct fd_batch *batch)
{
- const struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
/* workaround: combining scissor optimization and hw binning
* seems problematic. Seems like we end up with a mismatch
emit_binning_workaround(struct fd_batch *batch)
{
struct fd_context *ctx = batch->ctx;
- const struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct fd_ringbuffer *ring = batch->gmem;
struct fd3_emit emit = {
.debug = &ctx->debug,
{
struct fd_context *ctx = batch->ctx;
struct fd_ringbuffer *ring = batch->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
struct fd3_emit emit = {
.debug = &ctx->debug,
OUT_RING(ring, A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE |
A3XX_RB_RENDER_CONTROL_ENABLE_GMEM |
A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC(FUNC_NEVER) |
- A3XX_RB_RENDER_CONTROL_BIN_WIDTH(ctx->gmem.bin_w));
+ A3XX_RB_RENDER_CONTROL_BIN_WIDTH(batch->gmem_state->bin_w));
OUT_PKT0(ring, REG_A3XX_GRAS_SC_CONTROL, 1);
OUT_RING(ring, A3XX_GRAS_SC_CONTROL_RENDER_MODE(RB_RESOLVE_PASS) |
struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
if (!rsc->stencil || batch->resolve & FD_BUFFER_DEPTH)
emit_gmem2mem_surf(batch, RB_COPY_DEPTH_STENCIL, false,
- ctx->gmem.zsbuf_base[0], pfb->zsbuf);
+ gmem->zsbuf_base[0], pfb->zsbuf);
if (rsc->stencil && batch->resolve & FD_BUFFER_STENCIL)
emit_gmem2mem_surf(batch, RB_COPY_DEPTH_STENCIL, true,
- ctx->gmem.zsbuf_base[1], pfb->zsbuf);
+ gmem->zsbuf_base[1], pfb->zsbuf);
}
if (batch->resolve & FD_BUFFER_COLOR) {
if (!(batch->resolve & (PIPE_CLEAR_COLOR0 << i)))
continue;
emit_gmem2mem_surf(batch, RB_COPY_RESOLVE, false,
- ctx->gmem.cbuf_base[i], pfb->cbufs[i]);
+ gmem->cbuf_base[i], pfb->cbufs[i]);
}
}
OUT_PKT0(ring, REG_A3XX_RB_DEPTH_INFO, 2);
OUT_RING(ring, A3XX_RB_DEPTH_INFO_DEPTH_BASE(bases[0]) |
A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(DEPTHX_32));
- OUT_RING(ring, A3XX_RB_DEPTH_PITCH(4 * batch->ctx->gmem.bin_w));
+ OUT_RING(ring, A3XX_RB_DEPTH_PITCH(4 * batch->gmem_state->bin_w));
if (psurf[0]->format == PIPE_FORMAT_Z32_FLOAT) {
OUT_PKT0(ring, REG_A3XX_RB_MRT_CONTROL(0), 1);
fd3_emit_tile_mem2gmem(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_context *ctx = batch->ctx;
- const struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct fd_ringbuffer *ring = batch->gmem;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
struct fd3_emit emit = {
update_vsc_pipe(struct fd_batch *batch)
{
struct fd_context *ctx = batch->ctx;
- const struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct fd3_context *fd3_ctx = fd3_context(ctx);
struct fd_ringbuffer *ring = batch->gmem;
int i;
emit_binning_pass(struct fd_batch *batch)
{
struct fd_context *ctx = batch->ctx;
- const struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
struct fd_ringbuffer *ring = batch->gmem;
int i;
{
struct fd_ringbuffer *ring = batch->gmem;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
- const struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
uint32_t rb_render_control;
fd3_emit_restore(batch, ring);
struct fd_context *ctx = batch->ctx;
struct fd3_context *fd3_ctx = fd3_context(ctx);
struct fd_ringbuffer *ring = batch->gmem;
- const struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
uint32_t x1 = tile->xoff;
static bool
use_hw_binning(struct fd_batch *batch)
{
- const struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
if ((gmem->maxpw * gmem->maxph) > 32)
return false;
fd4_emit_tile_gmem2mem(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_context *ctx = batch->ctx;
- const struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct fd_ringbuffer *ring = batch->gmem;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
struct fd4_emit emit = {
if (batch->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
if (!rsc->stencil || (batch->resolve & FD_BUFFER_DEPTH))
- emit_gmem2mem_surf(batch, false, ctx->gmem.zsbuf_base[0], pfb->zsbuf);
+ emit_gmem2mem_surf(batch, false, gmem->zsbuf_base[0], pfb->zsbuf);
if (rsc->stencil && (batch->resolve & FD_BUFFER_STENCIL))
- emit_gmem2mem_surf(batch, true, ctx->gmem.zsbuf_base[1], pfb->zsbuf);
+ emit_gmem2mem_surf(batch, true, gmem->zsbuf_base[1], pfb->zsbuf);
}
if (batch->resolve & FD_BUFFER_COLOR) {
fd4_emit_tile_mem2gmem(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_context *ctx = batch->ctx;
- const struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct fd_ringbuffer *ring = batch->gmem;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
struct fd4_emit emit = {
update_vsc_pipe(struct fd_batch *batch)
{
struct fd_context *ctx = batch->ctx;
- const struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct fd4_context *fd4_ctx = fd4_context(ctx);
struct fd_ringbuffer *ring = batch->gmem;
int i;
static void
emit_binning_pass(struct fd_batch *batch)
{
- struct fd_context *ctx = batch->ctx;
- const struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
struct fd_ringbuffer *ring = batch->gmem;
int i;
{
struct fd_ringbuffer *ring = batch->gmem;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
- const struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
fd4_emit_restore(batch, ring);
static void
fd4_emit_tile_prep(struct fd_batch *batch, const struct fd_tile *tile)
{
- struct fd_context *ctx = batch->ctx;
struct fd_ringbuffer *ring = batch->gmem;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
- const struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
if (pfb->zsbuf) {
struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
struct fd_context *ctx = batch->ctx;
struct fd4_context *fd4_ctx = fd4_context(ctx);
struct fd_ringbuffer *ring = batch->gmem;
- const struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
uint32_t x1 = tile->xoff;
static bool
use_hw_binning(struct fd_batch *batch)
{
- const struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
if ((gmem->maxpw * gmem->maxph) > 32)
return false;
{
struct fd_context *ctx = batch->ctx;
struct fd5_context *fd5_ctx = fd5_context(ctx);
- const struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct fd_ringbuffer *ring = batch->gmem;
int i;
{
struct fd_context *ctx = batch->ctx;
struct fd_ringbuffer *ring = batch->gmem;
- const struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
uint32_t x1 = gmem->minx;
uint32_t y1 = gmem->miny;
static void
fd5_emit_tile_init(struct fd_batch *batch)
{
- struct fd_context *ctx = batch->ctx;
struct fd_ringbuffer *ring = batch->gmem;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
OUT_PKT4(ring, REG_A5XX_RB_CCU_CNTL, 1);
OUT_RING(ring, 0x7c13c080); /* RB_CCU_CNTL */
- emit_zs(ring, pfb->zsbuf, &ctx->gmem);
- emit_mrt(ring, pfb->nr_cbufs, pfb->cbufs, &ctx->gmem);
+ emit_zs(ring, pfb->zsbuf, batch->gmem_state);
+ emit_mrt(ring, pfb->nr_cbufs, pfb->cbufs, batch->gmem_state);
if (use_hw_binning(batch)) {
emit_binning_pass(batch);
fd5_emit_tile_prep(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_context *ctx = batch->ctx;
- const struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct fd5_context *fd5_ctx = fd5_context(ctx);
struct fd_ringbuffer *ring = batch->gmem;
struct pipe_surface *psurf, enum a5xx_blit_buf buf)
{
struct fd_ringbuffer *ring = batch->gmem;
- const struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct fd_resource *rsc = fd_resource(psurf->texture);
uint32_t stride, size;
fd5_emit_tile_mem2gmem(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_ringbuffer *ring = batch->gmem;
- struct fd_context *ctx = batch->ctx;
- const struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
/*
fd5_emit_tile_renderprep(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_ringbuffer *ring = batch->gmem;
- const struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
OUT_PKT4(ring, REG_A5XX_RB_CNTL, 1);
static void
fd5_emit_tile_gmem2mem(struct fd_batch *batch, const struct fd_tile *tile)
{
- struct fd_context *ctx = batch->ctx;
- const struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
if (batch->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
static bool
use_hw_binning(struct fd_batch *batch)
{
- const struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
// TODO figure out hw limits for binning
static void
patch_fb_read(struct fd_batch *batch)
{
- const struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
for (unsigned i = 0; i < fd_patch_num_elements(&batch->fb_read_patches); i++) {
struct fd_cs_patch *patch = fd_patch_element(&batch->fb_read_patches, i);
{
struct fd_context *ctx = batch->ctx;
struct fd6_context *fd6_ctx = fd6_context(ctx);
- const struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct fd_ringbuffer *ring = batch->gmem;
int i;
emit_vsc_overflow_test(struct fd_batch *batch)
{
struct fd_ringbuffer *ring = batch->gmem;
- const struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct fd6_context *fd6_ctx = fd6_context(batch->ctx);
debug_assert((fd6_ctx->vsc_data_pitch & 0x3) == 0);
emit_binning_pass(struct fd_batch *batch)
{
struct fd_ringbuffer *ring = batch->gmem;
- const struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct fd6_context *fd6_ctx = fd6_context(batch->ctx);
uint32_t x1 = gmem->minx;
struct fd_context *ctx = batch->ctx;
struct fd_ringbuffer *ring = batch->gmem;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
- const struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
fd6_emit_restore(batch, ring);
OUT_PKT4(ring, REG_A6XX_RB_CCU_CNTL, 1);
OUT_RING(ring, fd6_context(ctx)->magic.RB_CCU_CNTL_gmem);
- emit_zs(ring, pfb->zsbuf, &ctx->gmem);
- emit_mrt(ring, pfb, &ctx->gmem);
+ emit_zs(ring, pfb->zsbuf, batch->gmem_state);
+ emit_mrt(ring, pfb, batch->gmem_state);
emit_msaa(ring, pfb->samples);
patch_fb_read(batch);
fd6_emit_tile_prep(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_context *ctx = batch->ctx;
- const struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct fd6_context *fd6_ctx = fd6_context(ctx);
struct fd_ringbuffer *ring = batch->gmem;
set_window_offset(ring, x1, y1);
- const struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
set_bin_size(ring, gmem->bin_w, gmem->bin_h, 0x6000000);
OUT_PKT7(ring, CP_SET_MODE, 1);
emit_clears(struct fd_batch *batch, struct fd_ringbuffer *ring)
{
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
- const struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
enum a3xx_msaa_samples samples = fd_msaa_samples(pfb->samples);
uint32_t buffers = batch->fast_cleared;
static void
emit_restore_blits(struct fd_batch *batch, struct fd_ringbuffer *ring)
{
- struct fd_context *ctx = batch->ctx;
- const struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
if (batch->restore & FD_BUFFER_COLOR) {
static void
prepare_tile_fini_ib(struct fd_batch *batch)
{
- struct fd_context *ctx = batch->ctx;
- const struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
struct fd_ringbuffer *ring;
FD_GMEM_LOGICOP_ENABLED = 0x20,
FD_GMEM_FB_READ = 0x40,
} gmem_reason;
+
+ /* At submit time, once we've decided that this batch will use GMEM
+ * rendering, the appropriate gmem state is looked up:
+ */
+ const struct fd_gmem_stateobj *gmem_state;
+
unsigned num_draws; /* number of draws in current batch */
unsigned num_vertices; /* number of vertices in current batch */
}
static void
-render_tiles(struct fd_batch *batch)
+render_tiles(struct fd_batch *batch, struct fd_gmem_stateobj *gmem)
{
struct fd_context *ctx = batch->ctx;
- struct fd_gmem_stateobj *gmem = &ctx->gmem;
int i;
ctx->emit_tile_init(batch);
ctx->stats.batch_sysmem++;
} else {
struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ batch->gmem_state = gmem;
calculate_tiles(batch);
DBG("%p: rendering %dx%d tiles %ux%u (%s/%s)",
batch, pfb->width, pfb->height, gmem->nbins_x, gmem->nbins_y,
util_format_short_name(pipe_surface_format(pfb->zsbuf)));
if (ctx->query_prepare)
ctx->query_prepare(batch, gmem->nbins_x * gmem->nbins_y);
- render_tiles(batch);
+ render_tiles(batch, gmem);
ctx->stats.batch_gmem++;
}