i = 0;
for (group = query->groups; group; group = group->next) {
struct r600_perfcounter_block *block = group->block;
- unsigned select_dw, read_dw;
+ unsigned read_dw;
unsigned instances = 1;
if ((block->flags & R600_PC_BLOCK_SE) && group->se < 0)
void (*set_occlusion_query_state)(struct pipe_context *ctx,
bool old_enable,
bool old_perfect_enable);
-
- void (*save_qbo_state)(struct pipe_context *ctx, struct r600_qbo_state *st);
};
/* r600_buffer_common.c */
return;
}
- rctx->save_qbo_state(&rctx->b, &saved_state);
+ si_save_qbo_state(&rctx->b, &saved_state);
r600_get_hw_query_params(rctx, query, index >= 0 ? index : 0, ¶ms);
consts.end_offset = params.end_offset - params.start_offset;
si_mark_atom_dirty(sctx, &sctx->msaa_config);
}
-static void si_save_qbo_state(struct pipe_context *ctx, struct r600_qbo_state *st)
+void si_save_qbo_state(struct pipe_context *ctx, struct r600_qbo_state *st)
{
struct si_context *sctx = (struct si_context*)ctx;
sctx->b.b.set_active_query_state = si_set_active_query_state;
sctx->b.set_occlusion_query_state = si_set_occlusion_query_state;
- sctx->b.save_qbo_state = si_save_qbo_state;
sctx->b.b.draw_vbo = si_draw_vbo;
unsigned force_level);
void si_update_fb_dirtiness_after_rendering(struct si_context *sctx);
void si_update_ps_iter_samples(struct si_context *sctx);
+void si_save_qbo_state(struct pipe_context *ctx, struct r600_qbo_state *st);
/* si_state_binning.c */
void si_emit_dpbb_state(struct si_context *sctx, struct r600_atom *state);