For cases in which (after the following commit) ctx->batch may be null.
Prep work for following commit.
Signed-off-by: Rob Clark <robdclark@gmail.com>
* just hard coded. If we start exposing more countables than we
* have counters, we will need to be more clever.
*/
- fd_wfi(ctx->batch, ring);
+ struct fd_batch *batch = fd_context_batch(ctx);
+ fd_wfi(batch, ring);
OUT_PKT0(ring, REG_A4XX_CP_PERFCTR_CP_SEL_0, 1);
OUT_RING(ring, CP_ALWAYS_COUNT);
}
}
if (deferred) {
- struct fd_batch *current_batch = ctx->batch;
+ struct fd_batch *current_batch = fd_context_batch(ctx);
for (unsigned i = 0; i < n; i++) {
- if (batches[i] != current_batch) {
+ if (batches[i] && (batches[i]->ctx == ctx) &&
+ (batches[i] != current_batch)) {
fd_batch_add_dep(current_batch, batches[i]);
}
}
DBG("%p: flush: flags=%x\n", ctx->batch, flags);
+ if (!ctx->batch)
+ return;
+
/* Take a ref to the batch's fence (batch can be unref'd when flushed: */
fd_fence_ref(pctx->screen, &fence, ctx->batch->fence);
return (1 << prim) & ctx->primtype_mask;
}
+static inline struct fd_batch *
+fd_context_batch(struct fd_context *ctx)
+{
+ return ctx->batch;
+}
+
static inline void
fd_batch_set_stage(struct fd_batch *batch, enum fd_render_stage stage)
{
fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
{
struct fd_context *ctx = fd_context(pctx);
- struct fd_batch *batch = ctx->batch;
+ struct fd_batch *batch = fd_context_batch(ctx);
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx);
unsigned i, prims, buffers = 0, restore_buffers = 0;
const union pipe_color_union *color, double depth, unsigned stencil)
{
struct fd_context *ctx = fd_context(pctx);
- struct fd_batch *batch = ctx->batch;
+ struct fd_batch *batch = fd_context_batch(ctx);
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx);
unsigned cleared_buffers;
struct pipe_fence_handle *fence)
{
struct fd_context *ctx = fd_context(pctx);
- struct fd_batch *batch = ctx->batch;
+ struct fd_batch *batch = fd_context_batch(ctx);
fence_flush(fence);
static boolean
fd_acc_begin_query(struct fd_context *ctx, struct fd_query *q)
{
- struct fd_batch *batch = ctx->batch;
+ struct fd_batch *batch = fd_context_batch(ctx);
struct fd_acc_query *aq = fd_acc_query(q);
const struct fd_acc_sample_provider *p = aq->provider;
static void
fd_acc_end_query(struct fd_context *ctx, struct fd_query *q)
{
- struct fd_batch *batch = ctx->batch;
+ struct fd_batch *batch = fd_context_batch(ctx);
struct fd_acc_query *aq = fd_acc_query(q);
const struct fd_acc_sample_provider *p = aq->provider;
static boolean
fd_hw_begin_query(struct fd_context *ctx, struct fd_query *q)
{
- struct fd_batch *batch = ctx->batch;
+ struct fd_batch *batch = fd_context_batch(ctx);
struct fd_hw_query *hq = fd_hw_query(q);
DBG("%p: active=%d", q, q->active);
static void
fd_hw_end_query(struct fd_context *ctx, struct fd_query *q)
{
- struct fd_batch *batch = ctx->batch;
+ struct fd_batch *batch = fd_context_batch(ctx);
struct fd_hw_query *hq = fd_hw_query(q);
DBG("%p: active=%d", q, q->active);