}
static void
-fd2_emit_tile_gmem2mem(struct fd_batch *batch, struct fd_tile *tile)
+fd2_emit_tile_gmem2mem(struct fd_batch *batch, const struct fd_tile *tile)
{
fd2_emit_ib(batch->gmem, batch->tile_fini);
}
}
static void
-fd2_emit_tile_mem2gmem(struct fd_batch *batch, struct fd_tile *tile)
+fd2_emit_tile_mem2gmem(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_context *ctx = batch->ctx;
struct fd2_context *fd2_ctx = fd2_context(ctx);
/* before mem2gmem */
static void
-fd2_emit_tile_prep(struct fd_batch *batch, struct fd_tile *tile)
+fd2_emit_tile_prep(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_ringbuffer *ring = batch->gmem;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
/* before IB to rendering cmds: */
static void
-fd2_emit_tile_renderprep(struct fd_batch *batch, struct fd_tile *tile)
+fd2_emit_tile_renderprep(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_context *ctx = batch->ctx;
struct fd2_context *fd2_ctx = fd2_context(ctx);
}
static void
-fd3_emit_tile_gmem2mem(struct fd_batch *batch, struct fd_tile *tile)
+fd3_emit_tile_gmem2mem(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_context *ctx = batch->ctx;
struct fd_ringbuffer *ring = batch->gmem;
}
static void
-fd3_emit_tile_mem2gmem(struct fd_batch *batch, struct fd_tile *tile)
+fd3_emit_tile_mem2gmem(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_context *ctx = batch->ctx;
struct fd_gmem_stateobj *gmem = &ctx->gmem;
/* before mem2gmem */
static void
-fd3_emit_tile_prep(struct fd_batch *batch, struct fd_tile *tile)
+fd3_emit_tile_prep(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_ringbuffer *ring = batch->gmem;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
/* before IB to rendering cmds: */
static void
-fd3_emit_tile_renderprep(struct fd_batch *batch, struct fd_tile *tile)
+fd3_emit_tile_renderprep(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_context *ctx = batch->ctx;
struct fd3_context *fd3_ctx = fd3_context(ctx);
}
static void
-fd4_emit_tile_gmem2mem(struct fd_batch *batch, struct fd_tile *tile)
+fd4_emit_tile_gmem2mem(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_context *ctx = batch->ctx;
struct fd_gmem_stateobj *gmem = &ctx->gmem;
}
static void
-fd4_emit_tile_mem2gmem(struct fd_batch *batch, struct fd_tile *tile)
+fd4_emit_tile_mem2gmem(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_context *ctx = batch->ctx;
struct fd_gmem_stateobj *gmem = &ctx->gmem;
/* before mem2gmem */
static void
-fd4_emit_tile_prep(struct fd_batch *batch, struct fd_tile *tile)
+fd4_emit_tile_prep(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_context *ctx = batch->ctx;
struct fd_ringbuffer *ring = batch->gmem;
/* before IB to rendering cmds: */
static void
-fd4_emit_tile_renderprep(struct fd_batch *batch, struct fd_tile *tile)
+fd4_emit_tile_renderprep(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_context *ctx = batch->ctx;
struct fd4_context *fd4_ctx = fd4_context(ctx);
/* before mem2gmem */
static void
-fd5_emit_tile_prep(struct fd_batch *batch, struct fd_tile *tile)
+fd5_emit_tile_prep(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_context *ctx = batch->ctx;
struct fd_gmem_stateobj *gmem = &ctx->gmem;
}
static void
-fd5_emit_tile_mem2gmem(struct fd_batch *batch, struct fd_tile *tile)
+fd5_emit_tile_mem2gmem(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_ringbuffer *ring = batch->gmem;
struct fd_context *ctx = batch->ctx;
/* before IB to rendering cmds: */
static void
-fd5_emit_tile_renderprep(struct fd_batch *batch, struct fd_tile *tile)
+fd5_emit_tile_renderprep(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_ringbuffer *ring = batch->gmem;
struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
}
static void
-fd5_emit_tile_gmem2mem(struct fd_batch *batch, struct fd_tile *tile)
+fd5_emit_tile_gmem2mem(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_context *ctx = batch->ctx;
struct fd_gmem_stateobj *gmem = &ctx->gmem;
* is skipped for tiles that have no visible geometry.
*/
static void
-emit_conditional_ib(struct fd_batch *batch, struct fd_tile *tile,
+emit_conditional_ib(struct fd_batch *batch, const struct fd_tile *tile,
struct fd_ringbuffer *target)
{
struct fd_ringbuffer *ring = batch->gmem;
/* before mem2gmem */
static void
-fd6_emit_tile_prep(struct fd_batch *batch, struct fd_tile *tile)
+fd6_emit_tile_prep(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_context *ctx = batch->ctx;
struct fd_gmem_stateobj *gmem = &ctx->gmem;
* transfer from system memory to gmem
*/
static void
-fd6_emit_tile_mem2gmem(struct fd_batch *batch, struct fd_tile *tile)
+fd6_emit_tile_mem2gmem(struct fd_batch *batch, const struct fd_tile *tile)
{
}
/* before IB to rendering cmds: */
static void
-fd6_emit_tile_renderprep(struct fd_batch *batch, struct fd_tile *tile)
+fd6_emit_tile_renderprep(struct fd_batch *batch, const struct fd_tile *tile)
{
if (batch->fast_cleared || !use_hw_binning(batch)) {
fd6_emit_ib(batch->gmem, batch->tile_setup);
}
static void
-fd6_emit_tile(struct fd_batch *batch, struct fd_tile *tile)
+fd6_emit_tile(struct fd_batch *batch, const struct fd_tile *tile)
{
if (!use_hw_binning(batch)) {
fd6_emit_ib(batch->gmem, batch->draw);
}
static void
-fd6_emit_tile_gmem2mem(struct fd_batch *batch, struct fd_tile *tile)
+fd6_emit_tile_gmem2mem(struct fd_batch *batch, const struct fd_tile *tile)
{
struct fd_ringbuffer *ring = batch->gmem;
/* GMEM/tile handling fxns: */
void (*emit_tile_init)(struct fd_batch *batch);
- void (*emit_tile_prep)(struct fd_batch *batch, struct fd_tile *tile);
- void (*emit_tile_mem2gmem)(struct fd_batch *batch, struct fd_tile *tile);
- void (*emit_tile_renderprep)(struct fd_batch *batch, struct fd_tile *tile);
- void (*emit_tile)(struct fd_batch *batch, struct fd_tile *tile);
- void (*emit_tile_gmem2mem)(struct fd_batch *batch, struct fd_tile *tile);
+ void (*emit_tile_prep)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile_mem2gmem)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile_renderprep)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile_gmem2mem)(struct fd_batch *batch, const struct fd_tile *tile);
void (*emit_tile_fini)(struct fd_batch *batch); /* optional */
/* optional, for GMEM bypass: */
* case would be a single clear.
*/
bool
-fd_gmem_needs_restore(struct fd_batch *batch, struct fd_tile *tile,
+fd_gmem_needs_restore(struct fd_batch *batch, const struct fd_tile *tile,
uint32_t buffers)
{
if (!(batch->restore & buffers))
void fd_gmem_render_tiles(struct fd_batch *batch);
-bool fd_gmem_needs_restore(struct fd_batch *batch, struct fd_tile *tile,
+bool fd_gmem_needs_restore(struct fd_batch *batch, const struct fd_tile *tile,
uint32_t buffers);
#endif /* FREEDRENO_GMEM_H_ */