/* close out the draw cmds by making sure any active queries are
* paused:
*/
- fd_batch_set_stage(batch, batch->draw, FD_STAGE_NULL);
+ fd_batch_set_stage(batch, FD_STAGE_NULL);
fd_context_all_dirty(batch->ctx);
batch_flush_reset_dependencies(batch, true);
void (*query_prepare)(struct fd_batch *batch, uint32_t num_tiles);
void (*query_prepare_tile)(struct fd_batch *batch, uint32_t n,
struct fd_ringbuffer *ring);
- void (*query_set_stage)(struct fd_batch *batch,
- struct fd_ringbuffer *ring, enum fd_render_stage stage);
+ void (*query_set_stage)(struct fd_batch *batch, enum fd_render_stage stage);
/*
* Common pre-cooked VBO state (used for a3xx and later):
}
static inline void
-fd_batch_set_stage(struct fd_batch *batch,
- struct fd_ringbuffer *ring, enum fd_render_stage stage)
+fd_batch_set_stage(struct fd_batch *batch, enum fd_render_stage stage)
{
struct fd_context *ctx = batch->ctx;
return;
if (ctx->query_set_stage)
- ctx->query_set_stage(batch, ring, stage);
+ ctx->query_set_stage(batch, stage);
batch->stage = stage;
}
/* NOTE: needs to be before resource_written(batch->query_buf), otherwise
* query_buf may not be created yet.
*/
- fd_batch_set_stage(batch, batch->draw, FD_STAGE_DRAW);
+ fd_batch_set_stage(batch, FD_STAGE_DRAW);
/*
* Figure out the buffers/features we need:
return;
}
- fd_batch_set_stage(batch, batch->draw, FD_STAGE_CLEAR);
+ fd_batch_set_stage(batch, FD_STAGE_CLEAR);
ctx->clear(ctx, buffers, color, depth, stencil);
}
void
-fd_hw_query_set_stage(struct fd_batch *batch, struct fd_ringbuffer *ring,
- enum fd_render_stage stage)
+fd_hw_query_set_stage(struct fd_batch *batch, enum fd_render_stage stage)
{
if (stage != batch->stage) {
struct fd_hw_query *hq;
bool now_active = is_active(hq, stage);
if (now_active && !was_active)
- resume_query(batch, hq, ring);
+ resume_query(batch, hq, batch->draw);
else if (was_active && !now_active)
- pause_query(batch, hq, ring);
+ pause_query(batch, hq, batch->draw);
}
}
clear_sample_cache(batch);
void fd_hw_query_prepare(struct fd_batch *batch, uint32_t num_tiles);
void fd_hw_query_prepare_tile(struct fd_batch *batch, uint32_t n,
struct fd_ringbuffer *ring);
-void fd_hw_query_set_stage(struct fd_batch *batch,
- struct fd_ringbuffer *ring, enum fd_render_stage stage);
+void fd_hw_query_set_stage(struct fd_batch *batch, enum fd_render_stage stage);
void fd_hw_query_enable(struct fd_batch *batch, struct fd_ringbuffer *ring);
void fd_hw_query_register_provider(struct pipe_context *pctx,
const struct fd_hw_sample_provider *provider);
ctx->cond_query, ctx->cond_cond, ctx->cond_mode);
if (ctx->batch)
- fd_batch_set_stage(ctx->batch, ctx->batch->draw, stage);
+ fd_batch_set_stage(ctx->batch, stage);
ctx->in_blit = discard;
}
fd_blitter_pipe_end(struct fd_context *ctx)
{
if (ctx->batch)
- fd_batch_set_stage(ctx->batch, ctx->batch->draw, FD_STAGE_NULL);
+ fd_batch_set_stage(ctx->batch, FD_STAGE_NULL);
ctx->in_blit = false;
}
fd_batch_reference(&old_batch, ctx->batch);
if (likely(old_batch))
- fd_batch_set_stage(old_batch, old_batch->draw, FD_STAGE_NULL);
+ fd_batch_set_stage(old_batch, FD_STAGE_NULL);
batch = fd_batch_from_fb(&ctx->screen->batch_cache, ctx, framebuffer);
fd_batch_reference(&ctx->batch, NULL);