r600g: don't suspend timer queries for u_blitter
authorMarek Olšák <maraeo@gmail.com>
Thu, 23 Feb 2012 22:22:35 +0000 (23:22 +0100)
committerMarek Olšák <maraeo@gmail.com>
Mon, 5 Mar 2012 13:22:19 +0000 (14:22 +0100)
Timer queries should be able to measure the time spent in u_blitter as well.

Queries are split into two groups: the timer ones and the others (streamout,
occlusion), because we should only suspend non-timer queries for u_blitter,
and later if the non-timer queries are suspended, the context flush should
only suspend and resume the timer queries.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
src/gallium/drivers/r600/r600_blit.c
src/gallium/drivers/r600/r600_hw_context.c
src/gallium/drivers/r600/r600_pipe.c
src/gallium/drivers/r600/r600_pipe.h
src/gallium/drivers/r600/r600_query.c

index b9471b81afd1444f9fee00dad2357ec8a0c8d843..d58abea487ffe51e804412d516d19b9cdf14d4b3 100644 (file)
@@ -45,7 +45,7 @@ static void r600_blitter_begin(struct pipe_context *ctx, enum r600_blitter_op op
 {
        struct r600_context *rctx = (struct r600_context *)ctx;
 
-       r600_suspend_queries(rctx);
+       r600_suspend_nontimer_queries(rctx);
 
        util_blitter_save_blend(rctx->blitter, rctx->states[R600_PIPE_STATE_BLEND]);
        util_blitter_save_depth_stencil_alpha(rctx->blitter, rctx->states[R600_PIPE_STATE_DSA]);
@@ -95,7 +95,7 @@ static void r600_blitter_end(struct pipe_context *ctx)
                                               rctx->saved_render_cond_mode);
                rctx->saved_render_cond = NULL;
        }
-       r600_resume_queries(rctx);
+       r600_resume_nontimer_queries(rctx);
 }
 
 static unsigned u_num_layers(struct pipe_resource *r, unsigned level)
index 9679458c98b95354f4e5dfdc757bd1ec431a812d..5a48b3d9634c33d9211022ebab88797d57793d0f 100644 (file)
@@ -814,7 +814,8 @@ void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
        }
 
        /* Count in queries_suspend. */
-       num_dw += ctx->num_cs_dw_queries_suspend;
+       num_dw += ctx->num_cs_dw_nontimer_queries_suspend;
+       num_dw += ctx->num_cs_dw_timer_queries_suspend;
 
        /* Count in streamout_end at the end of CS. */
        num_dw += ctx->num_cs_dw_streamout_end;
@@ -1243,16 +1244,21 @@ void r600_context_flush(struct r600_context *ctx, unsigned flags)
 {
        struct radeon_winsys_cs *cs = ctx->cs;
        struct r600_block *enable_block = NULL;
-       bool queries_suspended = false;
+       bool timer_queries_suspended = false;
+       bool nontimer_queries_suspended = false;
        bool streamout_suspended = false;
 
        if (cs->cdw == ctx->atom_start_cs.atom.num_dw)
                return;
 
        /* suspend queries */
-       if (ctx->num_cs_dw_queries_suspend) {
-               r600_suspend_queries(ctx);
-               queries_suspended = true;
+       if (ctx->num_cs_dw_timer_queries_suspend) {
+               r600_suspend_timer_queries(ctx);
+               timer_queries_suspended = true;
+       }
+       if (ctx->num_cs_dw_nontimer_queries_suspend) {
+               r600_suspend_nontimer_queries(ctx);
+               nontimer_queries_suspended = true;
        }
 
        if (ctx->num_cs_dw_streamout_end) {
@@ -1284,8 +1290,11 @@ void r600_context_flush(struct r600_context *ctx, unsigned flags)
        }
 
        /* resume queries */
-       if (queries_suspended) {
-               r600_resume_queries(ctx);
+       if (timer_queries_suspended) {
+               r600_resume_timer_queries(ctx);
+       }
+       if (nontimer_queries_suspended) {
+               r600_resume_nontimer_queries(ctx);
        }
 
        /* set all valid group as dirty so they get reemited on
index 9929baa226b4cf3db7c256bc137c3abff948a784..b422d753078eaaacc14e82e9f12440dd2da29f00 100644 (file)
@@ -248,7 +248,8 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void
        rctx->chip_class = rscreen->chip_class;
 
        LIST_INITHEAD(&rctx->dirty_states);
-       LIST_INITHEAD(&rctx->active_query_list);
+       LIST_INITHEAD(&rctx->active_timer_queries);
+       LIST_INITHEAD(&rctx->active_nontimer_queries);
        LIST_INITHEAD(&rctx->dirty);
        LIST_INITHEAD(&rctx->resource_dirty);
        LIST_INITHEAD(&rctx->enable_list);
index bc0ffc038522e087390bb9d683a372086b2abd3f..62d831086fce3491385953a65ca66de220e8d957 100644 (file)
@@ -309,8 +309,18 @@ struct r600_context {
 
        /* The list of active queries. Only one query of each type can be active. */
        int                     num_occlusion_queries;
-       struct list_head        active_query_list;
-       unsigned                num_cs_dw_queries_suspend;
+
+       /* Manage queries in two separate groups:
+        * The timer ones and the others (streamout, occlusion).
+        *
+        * We do this because we should only suspend non-timer queries for u_blitter,
+        * and later if the non-timer queries are suspended, the context flush should
+        * only suspend and resume the timer queries. */
+       struct list_head        active_timer_queries;
+       unsigned                num_cs_dw_timer_queries_suspend;
+       struct list_head        active_nontimer_queries;
+       unsigned                num_cs_dw_nontimer_queries_suspend;
+
        unsigned                num_cs_dw_streamout_end;
 
        unsigned                backend_mask;
@@ -395,8 +405,10 @@ void r600_flush(struct pipe_context *ctx, struct pipe_fence_handle **fence,
 
 /* r600_query.c */
 void r600_init_query_functions(struct r600_context *rctx);
-void r600_suspend_queries(struct r600_context *ctx);
-void r600_resume_queries(struct r600_context *ctx);
+void r600_suspend_nontimer_queries(struct r600_context *ctx);
+void r600_resume_nontimer_queries(struct r600_context *ctx);
+void r600_suspend_timer_queries(struct r600_context *ctx);
+void r600_resume_timer_queries(struct r600_context *ctx);
 
 /* r600_resource.c */
 void r600_init_context_resource_functions(struct r600_context *r600);
index 25731c290f69960c175790199a37ec55c2cd6774..53440ae734a5f7588a64d06e3c710664f3928b94 100644 (file)
 #include "util/u_memory.h"
 #include "r600_hw_context_priv.h"
 
+static bool r600_is_timer_query(unsigned type)
+{
+       return type == PIPE_QUERY_TIME_ELAPSED ||
+              type == PIPE_QUERY_TIMESTAMP ||
+              type == PIPE_QUERY_TIMESTAMP_DISJOINT;
+}
+
 static struct r600_resource *r600_new_query_buffer(struct r600_context *ctx, unsigned type)
 {
        unsigned j, i, num_results, buf_size = 4096;
@@ -123,7 +130,11 @@ static void r600_emit_query_begin(struct r600_context *ctx, struct r600_query *q
        cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
        cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, query->buffer.buf, RADEON_USAGE_WRITE);
 
-       ctx->num_cs_dw_queries_suspend += query->num_cs_dw;
+       if (r600_is_timer_query(query->type)) {
+               ctx->num_cs_dw_timer_queries_suspend += query->num_cs_dw;
+       } else {
+               ctx->num_cs_dw_nontimer_queries_suspend += query->num_cs_dw;
+       }
 }
 
 static void r600_emit_query_end(struct r600_context *ctx, struct r600_query *query)
@@ -167,7 +178,12 @@ static void r600_emit_query_end(struct r600_context *ctx, struct r600_query *que
        cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, query->buffer.buf, RADEON_USAGE_WRITE);
 
        query->buffer.results_end += query->result_size;
-       ctx->num_cs_dw_queries_suspend -= query->num_cs_dw;
+
+       if (r600_is_timer_query(query->type)) {
+               ctx->num_cs_dw_timer_queries_suspend -= query->num_cs_dw;
+       } else {
+               ctx->num_cs_dw_nontimer_queries_suspend -= query->num_cs_dw;
+       }
 }
 
 static void r600_emit_query_predication(struct r600_context *ctx, struct r600_query *query,
@@ -324,7 +340,12 @@ static void r600_begin_query(struct pipe_context *ctx, struct pipe_query *query)
        r600_update_occlusion_query_state(rctx, rquery->type, 1);
 
        r600_emit_query_begin(rctx, rquery);
-       LIST_ADDTAIL(&rquery->list, &rctx->active_query_list);
+
+       if (r600_is_timer_query(rquery->type)) {
+               LIST_ADDTAIL(&rquery->list, &rctx->active_timer_queries);
+       } else {
+               LIST_ADDTAIL(&rquery->list, &rctx->active_nontimer_queries);
+       }
 }
 
 static void r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
@@ -525,23 +546,45 @@ static void r600_render_condition(struct pipe_context *ctx,
        }
 }
 
-void r600_suspend_queries(struct r600_context *ctx)
+void r600_suspend_nontimer_queries(struct r600_context *ctx)
 {
        struct r600_query *query;
 
-       LIST_FOR_EACH_ENTRY(query, &ctx->active_query_list, list) {
+       LIST_FOR_EACH_ENTRY(query, &ctx->active_nontimer_queries, list) {
                r600_emit_query_end(ctx, query);
        }
-       assert(ctx->num_cs_dw_queries_suspend == 0);
+       assert(ctx->num_cs_dw_nontimer_queries_suspend == 0);
+}
+
+void r600_resume_nontimer_queries(struct r600_context *ctx)
+{
+       struct r600_query *query;
+
+       assert(ctx->num_cs_dw_nontimer_queries_suspend == 0);
+
+       LIST_FOR_EACH_ENTRY(query, &ctx->active_nontimer_queries, list) {
+               r600_emit_query_begin(ctx, query);
+       }
+}
+
+void r600_suspend_timer_queries(struct r600_context *ctx)
+{
+       struct r600_query *query;
+
+       LIST_FOR_EACH_ENTRY(query, &ctx->active_timer_queries, list) {
+               r600_emit_query_end(ctx, query);
+       }
+
+       assert(ctx->num_cs_dw_timer_queries_suspend == 0);
 }
 
-void r600_resume_queries(struct r600_context *ctx)
+void r600_resume_timer_queries(struct r600_context *ctx)
 {
        struct r600_query *query;
 
-       assert(ctx->num_cs_dw_queries_suspend == 0);
+       assert(ctx->num_cs_dw_timer_queries_suspend == 0);
 
-       LIST_FOR_EACH_ENTRY(query, &ctx->active_query_list, list) {
+       LIST_FOR_EACH_ENTRY(query, &ctx->active_timer_queries, list) {
                r600_emit_query_begin(ctx, query);
        }
 }