* Rob Clark <robclark@freedesktop.org>
*/
+#include "freedreno_query_hw.h"
#include "fd3_context.h"
#include "fd3_blend.h"
u_upload_destroy(fd3_ctx->border_color_uploader);
+ fd_hw_query_fini(pctx);
+
fd_context_destroy(pctx);
}
if (!pctx)
return NULL;
+ fd_hw_query_init(pctx);
+
fd3_ctx->vs_pvt_mem = fd_bo_new(screen->dev, 0x2000,
DRM_FREEDRENO_GEM_TYPE_KMEM);
void fd3_query_context_init(struct pipe_context *pctx)
{
+ struct fd_context *ctx = fd_context(pctx);
+
+ ctx->create_query = fd_hw_create_query;
+ ctx->query_prepare = fd_hw_query_prepare;
+ ctx->query_prepare_tile = fd_hw_query_prepare_tile;
+ ctx->query_set_stage = fd_hw_query_set_stage;
+
fd_hw_query_register_provider(pctx, &occlusion_counter);
fd_hw_query_register_provider(pctx, &occlusion_predicate);
}
* Rob Clark <robclark@freedesktop.org>
*/
+#include "freedreno_query_hw.h"
#include "fd4_context.h"
#include "fd4_blend.h"
u_upload_destroy(fd4_ctx->border_color_uploader);
+ fd_hw_query_fini(pctx);
+
fd_context_destroy(pctx);
}
if (!pctx)
return NULL;
+ fd_hw_query_init(pctx);
+
fd4_ctx->vs_pvt_mem = fd_bo_new(screen->dev, 0x2000,
DRM_FREEDRENO_GEM_TYPE_KMEM);
void fd4_query_context_init(struct pipe_context *pctx)
{
+ struct fd_context *ctx = fd_context(pctx);
+
+ ctx->create_query = fd_hw_create_query;
+ ctx->query_prepare = fd_hw_query_prepare;
+ ctx->query_prepare_tile = fd_hw_query_prepare_tile;
+ ctx->query_set_stage = fd_hw_query_set_stage;
+
fd_hw_query_register_provider(pctx, &occlusion_counter);
fd_hw_query_register_provider(pctx, &occlusion_predicate);
fd_hw_query_register_provider(pctx, &time_elapsed);
// TODO hacks.. these should not be hardcoded:
OUT_PKT4(ring, REG_A5XX_GRAS_SC_CNTL, 1);
OUT_RING(ring, 0x00000008); /* GRAS_SC_CNTL */
-
- fd_hw_query_enable(batch, ring);
}
static void
/* close out the draw cmds by making sure any active queries are
* paused:
*/
- fd_hw_query_set_stage(batch, batch->draw, FD_STAGE_NULL);
+ fd_batch_set_stage(batch, batch->draw, FD_STAGE_NULL);
fd_context_all_dirty(batch->ctx);
batch_flush_reset_dependencies(batch, true);
fd_fence_ref(pctx->screen, &ctx->last_fence, NULL);
fd_prog_fini(pctx);
- fd_hw_query_fini(pctx);
if (ctx->blitter)
util_blitter_destroy(ctx->blitter);
fd_query_context_init(pctx);
fd_texture_init(pctx);
fd_state_init(pctx);
- fd_hw_query_init(pctx);
ctx->blitter = util_blitter_create(pctx);
if (!ctx->blitter)
/* indirect-branch emit: */
void (*emit_ib)(struct fd_ringbuffer *ring, struct fd_ringbuffer *target);
+ /* query: */
+ struct fd_query * (*create_query)(struct fd_context *ctx, unsigned query_type);
+ void (*query_prepare)(struct fd_batch *batch, uint32_t num_tiles);
+ void (*query_prepare_tile)(struct fd_batch *batch, uint32_t n,
+ struct fd_ringbuffer *ring);
+ void (*query_set_stage)(struct fd_batch *batch,
+ struct fd_ringbuffer *ring, enum fd_render_stage stage);
+
/*
* Common pre-cooked VBO state (used for a3xx and later):
*/
return (1 << prim) & ctx->primtype_mask;
}
+static inline void
+fd_batch_set_stage(struct fd_batch *batch,
+ struct fd_ringbuffer *ring, enum fd_render_stage stage)
+{
+ struct fd_context *ctx = batch->ctx;
+ if (ctx->query_set_stage)
+ ctx->query_set_stage(batch, ring, stage);
+}
+
void fd_context_setup_common_vbos(struct fd_context *ctx);
void fd_context_cleanup_common_vbos(struct fd_context *ctx);
/* NOTE: needs to be before resource_written(batch->query_buf), otherwise
* query_buf may not be created yet.
*/
- fd_hw_query_set_stage(batch, batch->draw, FD_STAGE_DRAW);
+ fd_batch_set_stage(batch, batch->draw, FD_STAGE_DRAW);
/*
* Figure out the buffers/features we need:
return;
}
- fd_hw_query_set_stage(batch, batch->draw, FD_STAGE_CLEAR);
+ fd_batch_set_stage(batch, batch->draw, FD_STAGE_CLEAR);
ctx->clear(ctx, buffers, color, depth, stencil);
ctx->emit_tile_renderprep(batch, tile);
- fd_hw_query_prepare_tile(batch, i, batch->gmem);
+ if (ctx->query_prepare_tile)
+ ctx->query_prepare_tile(batch, i, batch->gmem);
/* emit IB to drawcmds: */
ctx->emit_ib(batch->gmem, batch->draw);
ctx->emit_sysmem_prep(batch);
- fd_hw_query_prepare_tile(batch, 0, batch->gmem);
+ if (ctx->query_prepare_tile)
+ ctx->query_prepare_tile(batch, 0, batch->gmem);
/* emit IB to drawcmds: */
ctx->emit_ib(batch->gmem, batch->draw);
batch, pfb->width, pfb->height,
util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
util_format_short_name(pipe_surface_format(pfb->zsbuf)));
- fd_hw_query_prepare(batch, 1);
+ if (ctx->query_prepare)
+ ctx->query_prepare(batch, 1);
render_sysmem(batch);
ctx->stats.batch_sysmem++;
} else {
batch, pfb->width, pfb->height, gmem->nbins_x, gmem->nbins_y,
util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
util_format_short_name(pipe_surface_format(pfb->zsbuf)));
- fd_hw_query_prepare(batch, gmem->nbins_x * gmem->nbins_y);
+ if (ctx->query_prepare)
+ ctx->query_prepare(batch, gmem->nbins_x * gmem->nbins_y);
render_tiles(batch);
ctx->stats.batch_gmem++;
}
struct fd_query *q;
q = fd_sw_create_query(ctx, query_type);
- if (!q)
- q = fd_hw_create_query(ctx, query_type);
+ if (!q && ctx->create_query)
+ q = ctx->create_query(ctx, query_type);
return (struct pipe_query *) q;
}
ctx->cond_query, ctx->cond_cond, ctx->cond_mode);
if (ctx->batch)
- fd_hw_query_set_stage(ctx->batch, ctx->batch->draw, stage);
+ fd_batch_set_stage(ctx->batch, ctx->batch->draw, stage);
ctx->in_blit = discard;
}
fd_blitter_pipe_end(struct fd_context *ctx)
{
if (ctx->batch)
- fd_hw_query_set_stage(ctx->batch, ctx->batch->draw, FD_STAGE_NULL);
+ fd_batch_set_stage(ctx->batch, ctx->batch->draw, FD_STAGE_NULL);
ctx->in_blit = false;
}
fd_batch_reference(&old_batch, ctx->batch);
if (likely(old_batch))
- fd_hw_query_set_stage(old_batch, old_batch->draw, FD_STAGE_NULL);
+ fd_batch_set_stage(old_batch, old_batch->draw, FD_STAGE_NULL);
batch = fd_batch_from_fb(&ctx->screen->batch_cache, ctx, framebuffer);
fd_batch_reference(&ctx->batch, NULL);