r600_update_occlusion_query_state(ctx, query->b.type, 1);
si_update_prims_generated_query_state((void*)ctx, query->b.type, 1);
- si_need_cs_space((struct si_context*)ctx);
+ si_need_gfx_cs_space((struct si_context*)ctx);
/* Get a new query buffer if needed. */
if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
/* The queries which need begin already called this in begin_query. */
if (query->flags & R600_QUERY_HW_FLAG_NO_START)
- si_need_cs_space((struct si_context*)ctx);
+ si_need_gfx_cs_space((struct si_context*)ctx);
/* emit end query */
va = query->buffer.buf->gpu_address + query->buffer.results_end;
assert(ctx->num_cs_dw_queries_suspend == 0);
/* Check CS space here. Resuming must not be interrupted by flushes. */
- si_need_cs_space((struct si_context*)ctx);
+ si_need_gfx_cs_space((struct si_context*)ctx);
LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
r600_query_hw_emit_start(ctx, query);
}
if (!(user_flags & SI_CPDMA_SKIP_CHECK_CS_SPACE))
- si_need_cs_space(sctx);
+ si_need_gfx_cs_space(sctx);
/* This must be done after need_cs_space. */
if (!(user_flags & SI_CPDMA_SKIP_BO_LIST_UPDATE)) {
void si_flush_gfx_cs(void *context, unsigned flags,
struct pipe_fence_handle **fence);
void si_begin_new_cs(struct si_context *ctx);
-void si_need_cs_space(struct si_context *ctx);
+void si_need_gfx_cs_space(struct si_context *ctx);
/* si_compute.c */
void si_init_compute_functions(struct si_context *sctx);
}
}
- si_need_cs_space(sctx);
+ si_need_gfx_cs_space(sctx);
/* Since we've called r600_context_add_resource_size for vertex buffers,
* this must be called after si_need_cs_space, because we must let