From: Marek Olšák Date: Tue, 21 Jan 2014 17:01:01 +0000 (+0100) Subject: r600g: move queries to drivers/radeon X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=4e5c70e066b0fa28a6e40791963604286929a412;p=mesa.git r600g: move queries to drivers/radeon Reviewed-by: Michel Dänzer Reviewed-by: Tom Stellard --- diff --git a/src/gallium/drivers/r600/Makefile.sources b/src/gallium/drivers/r600/Makefile.sources index d96d98bc0a1..f04e156d086 100644 --- a/src/gallium/drivers/r600/Makefile.sources +++ b/src/gallium/drivers/r600/Makefile.sources @@ -4,7 +4,6 @@ C_SOURCES = \ r600_hw_context.c \ r600_isa.c \ r600_pipe.c \ - r600_query.c \ r600_resource.c \ r600_shader.c \ r600_state.c \ diff --git a/src/gallium/drivers/r600/r600_blit.c b/src/gallium/drivers/r600/r600_blit.c index de8ff4fcf7f..c2ae2f6b28e 100644 --- a/src/gallium/drivers/r600/r600_blit.c +++ b/src/gallium/drivers/r600/r600_blit.c @@ -54,7 +54,7 @@ static void r600_blitter_begin(struct pipe_context *ctx, enum r600_blitter_op op { struct r600_context *rctx = (struct r600_context *)ctx; - r600_suspend_nontimer_queries(rctx); + r600_suspend_nontimer_queries(&rctx->b); util_blitter_save_vertex_buffer_slot(rctx->blitter, rctx->vertex_buffer_state.vb); util_blitter_save_vertex_elements(rctx->blitter, rctx->vertex_fetch_shader.cso); @@ -86,18 +86,18 @@ static void r600_blitter_begin(struct pipe_context *ctx, enum r600_blitter_op op (struct pipe_sampler_view**)rctx->samplers[PIPE_SHADER_FRAGMENT].views.views); } - if ((op & R600_DISABLE_RENDER_COND) && rctx->current_render_cond) { + if ((op & R600_DISABLE_RENDER_COND) && rctx->b.current_render_cond) { util_blitter_save_render_condition(rctx->blitter, - rctx->current_render_cond, - rctx->current_render_cond_cond, - rctx->current_render_cond_mode); + rctx->b.current_render_cond, + rctx->b.current_render_cond_cond, + rctx->b.current_render_cond_mode); } } static void r600_blitter_end(struct pipe_context *ctx) { struct r600_context *rctx = (struct r600_context *)ctx; - r600_resume_nontimer_queries(rctx); + r600_resume_nontimer_queries(&rctx->b); } static unsigned u_max_sample(struct pipe_resource *r) diff --git a/src/gallium/drivers/r600/r600_hw_context.c b/src/gallium/drivers/r600/r600_hw_context.c index 7afd4b0099f..d6b0280affa 100644 --- a/src/gallium/drivers/r600/r600_hw_context.c +++ b/src/gallium/drivers/r600/r600_hw_context.c @@ -29,94 +29,11 @@ #include #include -/* Get backends mask */ -void r600_get_backend_mask(struct r600_context *ctx) -{ - struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs; - struct r600_resource *buffer; - uint32_t *results; - unsigned num_backends = ctx->screen->b.info.r600_num_backends; - unsigned i, mask = 0; - uint64_t va; - - /* if backend_map query is supported by the kernel */ - if (ctx->screen->b.info.r600_backend_map_valid) { - unsigned num_tile_pipes = ctx->screen->b.info.r600_num_tile_pipes; - unsigned backend_map = ctx->screen->b.info.r600_backend_map; - unsigned item_width, item_mask; - - if (ctx->b.chip_class >= EVERGREEN) { - item_width = 4; - item_mask = 0x7; - } else { - item_width = 2; - item_mask = 0x3; - } - - while(num_tile_pipes--) { - i = backend_map & item_mask; - mask |= (1<>= item_width; - } - if (mask != 0) { - ctx->backend_mask = mask; - return; - } - } - - /* otherwise backup path for older kernels */ - - /* create buffer for event data */ - buffer = (struct r600_resource*) - pipe_buffer_create(&ctx->screen->b.b, PIPE_BIND_CUSTOM, - PIPE_USAGE_STAGING, ctx->max_db*16); - if (!buffer) - goto err; - va = r600_resource_va(&ctx->screen->b.b, (void*)buffer); - - /* initialize buffer with zeroes */ - results = r600_buffer_map_sync_with_rings(&ctx->b, buffer, PIPE_TRANSFER_WRITE); - if (results) { - memset(results, 0, ctx->max_db * 4 * 4); - ctx->b.ws->buffer_unmap(buffer->cs_buf); - - /* emit EVENT_WRITE for ZPASS_DONE */ - cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0); - cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1); - cs->buf[cs->cdw++] = va; - cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF; - - cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); - cs->buf[cs->cdw++] = r600_context_bo_reloc(&ctx->b, &ctx->b.rings.gfx, buffer, RADEON_USAGE_WRITE); - - /* analyze results */ - results = r600_buffer_map_sync_with_rings(&ctx->b, buffer, PIPE_TRANSFER_READ); - if (results) { - for(i = 0; i < ctx->max_db; i++) { - /* at least highest bit will be set if backend is used */ - if (results[i*4 + 1]) - mask |= (1<b.ws->buffer_unmap(buffer->cs_buf); - } - } - - pipe_resource_reference((struct pipe_resource**)&buffer, NULL); - - if (mask != 0) { - ctx->backend_mask = mask; - return; - } - -err: - /* fallback to old method - set num_backends lower bits to 1 */ - ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends); - return; -} void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw, boolean count_draw_in) { + if (!ctx->b.ws->cs_memory_below_limit(ctx->b.rings.gfx.cs, ctx->b.vram, ctx->b.gtt)) { ctx->b.gtt = 0; ctx->b.vram = 0; @@ -151,7 +68,7 @@ void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw, } /* Count in queries_suspend. */ - num_dw += ctx->num_cs_dw_nontimer_queries_suspend; + num_dw += ctx->b.num_cs_dw_nontimer_queries_suspend; /* Count in streamout_end at the end of CS. */ if (ctx->b.streamout.begin_emitted) { @@ -159,7 +76,7 @@ void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw, } /* Count in render_condition(NULL) at the end of CS. */ - if (ctx->predicate_drawing) { + if (ctx->b.predicate_drawing) { num_dw += 3; } @@ -317,13 +234,13 @@ void r600_context_flush(struct r600_context *ctx, unsigned flags) { struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs; - ctx->nontimer_queries_suspended = false; + ctx->b.nontimer_queries_suspended = false; ctx->b.streamout.suspended = false; /* suspend queries */ - if (ctx->num_cs_dw_nontimer_queries_suspend) { - r600_suspend_nontimer_queries(ctx); - ctx->nontimer_queries_suspended = true; + if (ctx->b.num_cs_dw_nontimer_queries_suspend) { + r600_suspend_nontimer_queries(&ctx->b); + ctx->b.nontimer_queries_suspended = true; } if (ctx->b.streamout.begin_emitted) { @@ -421,8 +338,8 @@ void r600_begin_new_cs(struct r600_context *ctx) } /* resume queries */ - if (ctx->nontimer_queries_suspended) { - r600_resume_nontimer_queries(ctx); + if (ctx->b.nontimer_queries_suspended) { + r600_resume_nontimer_queries(&ctx->b); } /* Re-emit the draw state. */ diff --git a/src/gallium/drivers/r600/r600_pipe.c b/src/gallium/drivers/r600/r600_pipe.c index c8636947eb1..e217be5abae 100644 --- a/src/gallium/drivers/r600/r600_pipe.c +++ b/src/gallium/drivers/r600/r600_pipe.c @@ -78,10 +78,10 @@ static void r600_flush(struct pipe_context *ctx, unsigned flags) rctx->b.rings.gfx.flushing = true; /* Disable render condition. */ - if (rctx->current_render_cond) { - render_cond = rctx->current_render_cond; - render_cond_cond = rctx->current_render_cond_cond; - render_cond_mode = rctx->current_render_cond_mode; + if (rctx->b.current_render_cond) { + render_cond = rctx->b.current_render_cond; + render_cond_cond = rctx->b.current_render_cond_cond; + render_cond_mode = rctx->b.current_render_cond_mode; ctx->render_condition(ctx, NULL, FALSE, 0); } @@ -210,10 +210,7 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void rctx->screen = rscreen; rctx->keep_tiling_flags = rscreen->b.info.drm_minor >= 12; - LIST_INITHEAD(&rctx->active_nontimer_queries); - r600_init_blit_functions(rctx); - r600_init_query_functions(rctx); r600_init_context_resource_functions(rctx); if (rscreen->b.info.has_uvd) { @@ -231,7 +228,6 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void case R700: r600_init_state_functions(rctx); r600_init_atom_start_cs(rctx); - rctx->max_db = 4; rctx->custom_dsa_flush = r600_create_db_flush_dsa(rctx); rctx->custom_blend_resolve = rctx->b.chip_class == R700 ? r700_create_resolve_blend(rctx) : r600_create_resolve_blend(rctx); @@ -247,7 +243,6 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void evergreen_init_state_functions(rctx); evergreen_init_atom_start_cs(rctx); evergreen_init_atom_start_compute_cs(rctx); - rctx->max_db = 8; rctx->custom_dsa_flush = evergreen_create_db_flush_dsa(rctx); rctx->custom_blend_resolve = evergreen_create_resolve_blend(rctx); rctx->custom_blend_decompress = evergreen_create_decompress_blend(rctx); @@ -298,7 +293,7 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void rctx->blitter->draw_rectangle = r600_draw_rectangle; r600_begin_new_cs(rctx); - r600_get_backend_mask(rctx); /* this emits commands and must be last */ + r600_query_init_backend_mask(&rctx->b); /* this emits commands and must be last */ rctx->dummy_pixel_shader = util_make_fragment_cloneinput_shader(&rctx->b.b, 0, diff --git a/src/gallium/drivers/r600/r600_pipe.h b/src/gallium/drivers/r600/r600_pipe.h index 88b08604940..85bebc3bbf8 100644 --- a/src/gallium/drivers/r600/r600_pipe.h +++ b/src/gallium/drivers/r600/r600_pipe.h @@ -62,11 +62,6 @@ #define R600_BIG_ENDIAN 0 #endif -#define R600_QUERY_DRAW_CALLS (PIPE_QUERY_DRIVER_SPECIFIC + 0) -#define R600_QUERY_REQUESTED_VRAM (PIPE_QUERY_DRIVER_SPECIFIC + 1) -#define R600_QUERY_REQUESTED_GTT (PIPE_QUERY_DRIVER_SPECIFIC + 2) -#define R600_QUERY_BUFFER_WAIT_TIME (PIPE_QUERY_DRIVER_SPECIFIC + 3) - struct r600_context; struct r600_bytecode; struct r600_shader_key; @@ -364,34 +359,6 @@ struct r600_shader_state { struct r600_pipe_shader_selector *shader; }; -struct r600_query_buffer { - /* The buffer where query results are stored. */ - struct r600_resource *buf; - /* Offset of the next free result after current query data */ - unsigned results_end; - /* If a query buffer is full, a new buffer is created and the old one - * is put in here. When we calculate the result, we sum up the samples - * from all buffers. */ - struct r600_query_buffer *previous; -}; - -struct r600_query { - /* The query buffer and how many results are in it. */ - struct r600_query_buffer buffer; - /* The type of query */ - unsigned type; - /* Size of the result in memory for both begin_query and end_query, - * this can be one or two numbers, or it could even be a size of a structure. */ - unsigned result_size; - /* The number of dwords for begin_query or end_query. */ - unsigned num_cs_dw; - /* linked list of queries */ - struct list_head list; - /* for custom non-GPU queries */ - uint64_t begin_result; - uint64_t end_result; -}; - struct r600_context { struct r600_common_context b; struct r600_screen *screen; @@ -404,8 +371,6 @@ struct r600_context { boolean keep_tiling_flags; unsigned default_ps_gprs, default_vs_gprs; unsigned r6xx_num_clause_temp_gprs; - unsigned backend_mask; - unsigned max_db; /* for OQ */ /* Miscellaneous state objects. */ void *custom_dsa_flush; @@ -478,25 +443,6 @@ struct r600_context { int last_primitive_type; /* Last primitive type used in draw_vbo. */ int last_start_instance; - /* Queries. */ - /* The list of active queries. Only one query of each type can be active. */ - int num_occlusion_queries; - int num_pipelinestat_queries; - /* Keep track of non-timer queries, because they should be suspended - * during context flushing. - * The timer queries (TIME_ELAPSED) shouldn't be suspended. */ - struct list_head active_nontimer_queries; - unsigned num_cs_dw_nontimer_queries_suspend; - /* If queries have been suspended. */ - bool nontimer_queries_suspended; - unsigned num_draw_calls; - - /* Render condition. */ - struct pipe_query *current_render_cond; - unsigned current_render_cond_mode; - boolean current_render_cond_cond; - boolean predicate_drawing; - void *sb_context; struct r600_isa *isa; }; @@ -589,12 +535,6 @@ void r600_decompress_color_textures(struct r600_context *rctx, /* r600_pipe.c */ const char * r600_llvm_gpu_string(enum radeon_family family); - -/* r600_query.c */ -void r600_init_query_functions(struct r600_context *rctx); -void r600_suspend_nontimer_queries(struct r600_context *ctx); -void r600_resume_nontimer_queries(struct r600_context *ctx); - /* r600_resource.c */ void r600_init_context_resource_functions(struct r600_context *r600); @@ -628,7 +568,6 @@ boolean r600_is_format_supported(struct pipe_screen *screen, void r600_update_db_shader_control(struct r600_context * rctx); /* r600_hw_context.c */ -void r600_get_backend_mask(struct r600_context *ctx); void r600_context_flush(struct r600_context *ctx, unsigned flags); void r600_begin_new_cs(struct r600_context *ctx); void r600_flush_emit(struct r600_context *ctx); diff --git a/src/gallium/drivers/r600/r600_query.c b/src/gallium/drivers/r600/r600_query.c deleted file mode 100644 index b9ff6a62155..00000000000 --- a/src/gallium/drivers/r600/r600_query.c +++ /dev/null @@ -1,745 +0,0 @@ -/* - * Copyright 2010 Jerome Glisse - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * on the rights to use, copy, modify, merge, publish, distribute, sub - * license, and/or sell copies of the Software, and to permit persons to whom - * the Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - */ -#include "r600_pipe.h" -#include "r600d.h" -#include "util/u_memory.h" - -static bool r600_is_timer_query(unsigned type) -{ - return type == PIPE_QUERY_TIME_ELAPSED || - type == PIPE_QUERY_TIMESTAMP || - type == PIPE_QUERY_TIMESTAMP_DISJOINT; -} - -static bool r600_query_needs_begin(unsigned type) -{ - return type != PIPE_QUERY_GPU_FINISHED && - type != PIPE_QUERY_TIMESTAMP; -} - -static struct r600_resource *r600_new_query_buffer(struct r600_context *ctx, unsigned type) -{ - unsigned j, i, num_results, buf_size = 4096; - uint32_t *results; - - /* Non-GPU queries. */ - switch (type) { - case R600_QUERY_DRAW_CALLS: - case R600_QUERY_REQUESTED_VRAM: - case R600_QUERY_REQUESTED_GTT: - case R600_QUERY_BUFFER_WAIT_TIME: - return NULL; - } - - /* Queries are normally read by the CPU after - * being written by the gpu, hence staging is probably a good - * usage pattern. - */ - struct r600_resource *buf = (struct r600_resource*) - pipe_buffer_create(&ctx->screen->b.b, PIPE_BIND_CUSTOM, - PIPE_USAGE_STAGING, buf_size); - - switch (type) { - case PIPE_QUERY_OCCLUSION_COUNTER: - case PIPE_QUERY_OCCLUSION_PREDICATE: - results = r600_buffer_map_sync_with_rings(&ctx->b, buf, PIPE_TRANSFER_WRITE); - memset(results, 0, buf_size); - - /* Set top bits for unused backends. */ - num_results = buf_size / (16 * ctx->max_db); - for (j = 0; j < num_results; j++) { - for (i = 0; i < ctx->max_db; i++) { - if (!(ctx->backend_mask & (1<max_db; - } - ctx->b.ws->buffer_unmap(buf->cs_buf); - break; - case PIPE_QUERY_TIME_ELAPSED: - case PIPE_QUERY_TIMESTAMP: - break; - case PIPE_QUERY_PRIMITIVES_EMITTED: - case PIPE_QUERY_PRIMITIVES_GENERATED: - case PIPE_QUERY_SO_STATISTICS: - case PIPE_QUERY_SO_OVERFLOW_PREDICATE: - case PIPE_QUERY_PIPELINE_STATISTICS: - results = r600_buffer_map_sync_with_rings(&ctx->b, buf, PIPE_TRANSFER_WRITE); - memset(results, 0, buf_size); - ctx->b.ws->buffer_unmap(buf->cs_buf); - break; - default: - assert(0); - } - return buf; -} - -static void r600_update_occlusion_query_state(struct r600_context *rctx, - unsigned type, int diff) -{ - if (type == PIPE_QUERY_OCCLUSION_COUNTER || - type == PIPE_QUERY_OCCLUSION_PREDICATE) { - bool enable; - - rctx->num_occlusion_queries += diff; - assert(rctx->num_occlusion_queries >= 0); - - enable = rctx->num_occlusion_queries != 0; - - if (rctx->db_misc_state.occlusion_query_enabled != enable) { - rctx->db_misc_state.occlusion_query_enabled = enable; - rctx->db_misc_state.atom.dirty = true; - } - } -} - -static void r600_emit_query_begin(struct r600_context *ctx, struct r600_query *query) -{ - struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs; - uint64_t va; - - r600_update_occlusion_query_state(ctx, query->type, 1); - r600_need_cs_space(ctx, query->num_cs_dw * 2, TRUE); - - /* Get a new query buffer if needed. */ - if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) { - struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer); - *qbuf = query->buffer; - query->buffer.buf = r600_new_query_buffer(ctx, query->type); - query->buffer.results_end = 0; - query->buffer.previous = qbuf; - } - - /* emit begin query */ - va = r600_resource_va(&ctx->screen->b.b, (void*)query->buffer.buf); - va += query->buffer.results_end; - - switch (query->type) { - case PIPE_QUERY_OCCLUSION_COUNTER: - case PIPE_QUERY_OCCLUSION_PREDICATE: - cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0); - cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1); - cs->buf[cs->cdw++] = va; - cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF; - break; - case PIPE_QUERY_PRIMITIVES_EMITTED: - case PIPE_QUERY_PRIMITIVES_GENERATED: - case PIPE_QUERY_SO_STATISTICS: - case PIPE_QUERY_SO_OVERFLOW_PREDICATE: - cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0); - cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SAMPLE_STREAMOUTSTATS) | EVENT_INDEX(3); - cs->buf[cs->cdw++] = va; - cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF; - break; - case PIPE_QUERY_TIME_ELAPSED: - cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0); - cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5); - cs->buf[cs->cdw++] = va; - cs->buf[cs->cdw++] = (3 << 29) | ((va >> 32UL) & 0xFF); - cs->buf[cs->cdw++] = 0; - cs->buf[cs->cdw++] = 0; - break; - case PIPE_QUERY_PIPELINE_STATISTICS: - if (!ctx->num_pipelinestat_queries) { - cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0); - cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_START) | EVENT_INDEX(0); - } - ctx->num_pipelinestat_queries++; - cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0); - cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2); - cs->buf[cs->cdw++] = va; - cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF; - break; - default: - assert(0); - } - cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); - cs->buf[cs->cdw++] = r600_context_bo_reloc(&ctx->b, &ctx->b.rings.gfx, query->buffer.buf, RADEON_USAGE_WRITE); - - if (!r600_is_timer_query(query->type)) { - ctx->num_cs_dw_nontimer_queries_suspend += query->num_cs_dw; - } -} - -static void r600_emit_query_end(struct r600_context *ctx, struct r600_query *query) -{ - struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs; - uint64_t va; - - /* The queries which need begin already called this in begin_query. */ - if (!r600_query_needs_begin(query->type)) { - r600_need_cs_space(ctx, query->num_cs_dw, FALSE); - } - - va = r600_resource_va(&ctx->screen->b.b, (void*)query->buffer.buf); - /* emit end query */ - switch (query->type) { - case PIPE_QUERY_OCCLUSION_COUNTER: - case PIPE_QUERY_OCCLUSION_PREDICATE: - va += query->buffer.results_end + 8; - cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0); - cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1); - cs->buf[cs->cdw++] = va; - cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF; - break; - case PIPE_QUERY_PRIMITIVES_EMITTED: - case PIPE_QUERY_PRIMITIVES_GENERATED: - case PIPE_QUERY_SO_STATISTICS: - case PIPE_QUERY_SO_OVERFLOW_PREDICATE: - va += query->buffer.results_end + query->result_size/2; - cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0); - cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SAMPLE_STREAMOUTSTATS) | EVENT_INDEX(3); - cs->buf[cs->cdw++] = va; - cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF; - break; - case PIPE_QUERY_TIME_ELAPSED: - va += query->buffer.results_end + query->result_size/2; - /* fall through */ - case PIPE_QUERY_TIMESTAMP: - cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0); - cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5); - cs->buf[cs->cdw++] = va; - cs->buf[cs->cdw++] = (3 << 29) | ((va >> 32UL) & 0xFF); - cs->buf[cs->cdw++] = 0; - cs->buf[cs->cdw++] = 0; - break; - case PIPE_QUERY_PIPELINE_STATISTICS: - assert(ctx->num_pipelinestat_queries > 0); - ctx->num_pipelinestat_queries--; - if (!ctx->num_pipelinestat_queries) { - cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0); - cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_STOP) | EVENT_INDEX(0); - } - va += query->buffer.results_end + query->result_size/2; - cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0); - cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2); - cs->buf[cs->cdw++] = va; - cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF; - break; - default: - assert(0); - } - cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); - cs->buf[cs->cdw++] = r600_context_bo_reloc(&ctx->b, &ctx->b.rings.gfx, query->buffer.buf, RADEON_USAGE_WRITE); - - query->buffer.results_end += query->result_size; - - if (r600_query_needs_begin(query->type)) { - if (!r600_is_timer_query(query->type)) { - ctx->num_cs_dw_nontimer_queries_suspend -= query->num_cs_dw; - } - } - - r600_update_occlusion_query_state(ctx, query->type, -1); -} - -static void r600_emit_query_predication(struct r600_context *ctx, struct r600_query *query, - int operation, bool flag_wait) -{ - struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs; - - if (operation == PREDICATION_OP_CLEAR) { - r600_need_cs_space(ctx, 3, FALSE); - - cs->buf[cs->cdw++] = PKT3(PKT3_SET_PREDICATION, 1, 0); - cs->buf[cs->cdw++] = 0; - cs->buf[cs->cdw++] = PRED_OP(PREDICATION_OP_CLEAR); - } else { - struct r600_query_buffer *qbuf; - unsigned count; - uint32_t op; - - /* Find how many results there are. */ - count = 0; - for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) { - count += qbuf->results_end / query->result_size; - } - - r600_need_cs_space(ctx, 5 * count, TRUE); - - op = PRED_OP(operation) | PREDICATION_DRAW_VISIBLE | - (flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW); - - /* emit predicate packets for all data blocks */ - for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) { - unsigned results_base = 0; - uint64_t va = r600_resource_va(&ctx->screen->b.b, &qbuf->buf->b.b); - - while (results_base < qbuf->results_end) { - cs->buf[cs->cdw++] = PKT3(PKT3_SET_PREDICATION, 1, 0); - cs->buf[cs->cdw++] = (va + results_base) & 0xFFFFFFFFUL; - cs->buf[cs->cdw++] = op | (((va + results_base) >> 32UL) & 0xFF); - cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); - cs->buf[cs->cdw++] = r600_context_bo_reloc(&ctx->b, &ctx->b.rings.gfx, qbuf->buf, RADEON_USAGE_READ); - results_base += query->result_size; - - /* set CONTINUE bit for all packets except the first */ - op |= PREDICATION_CONTINUE; - } - } while (qbuf); - } -} - -static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type) -{ - struct r600_context *rctx = (struct r600_context *)ctx; - struct r600_query *query; - bool skip_allocation = false; - - query = CALLOC_STRUCT(r600_query); - if (query == NULL) - return NULL; - - query->type = query_type; - - switch (query_type) { - case PIPE_QUERY_OCCLUSION_COUNTER: - case PIPE_QUERY_OCCLUSION_PREDICATE: - query->result_size = 16 * rctx->max_db; - query->num_cs_dw = 6; - break; - case PIPE_QUERY_TIME_ELAPSED: - query->result_size = 16; - query->num_cs_dw = 8; - break; - case PIPE_QUERY_TIMESTAMP: - query->result_size = 8; - query->num_cs_dw = 8; - break; - case PIPE_QUERY_PRIMITIVES_EMITTED: - case PIPE_QUERY_PRIMITIVES_GENERATED: - case PIPE_QUERY_SO_STATISTICS: - case PIPE_QUERY_SO_OVERFLOW_PREDICATE: - /* NumPrimitivesWritten, PrimitiveStorageNeeded. */ - query->result_size = 32; - query->num_cs_dw = 6; - break; - case PIPE_QUERY_PIPELINE_STATISTICS: - /* 11 values on EG, 8 on R600. */ - query->result_size = (rctx->b.chip_class >= EVERGREEN ? 11 : 8) * 16; - query->num_cs_dw = 8; - break; - /* Non-GPU queries. */ - case R600_QUERY_DRAW_CALLS: - case R600_QUERY_REQUESTED_VRAM: - case R600_QUERY_REQUESTED_GTT: - case R600_QUERY_BUFFER_WAIT_TIME: - skip_allocation = true; - break; - default: - assert(0); - FREE(query); - return NULL; - } - - if (!skip_allocation) { - query->buffer.buf = r600_new_query_buffer(rctx, query_type); - if (!query->buffer.buf) { - FREE(query); - return NULL; - } - } - return (struct pipe_query*)query; -} - -static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query) -{ - struct r600_query *rquery = (struct r600_query*)query; - struct r600_query_buffer *prev = rquery->buffer.previous; - - /* Release all query buffers. */ - while (prev) { - struct r600_query_buffer *qbuf = prev; - prev = prev->previous; - pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL); - FREE(qbuf); - } - - pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL); - FREE(query); -} - -static void r600_begin_query(struct pipe_context *ctx, struct pipe_query *query) -{ - struct r600_context *rctx = (struct r600_context *)ctx; - struct r600_query *rquery = (struct r600_query *)query; - struct r600_query_buffer *prev = rquery->buffer.previous; - - if (!r600_query_needs_begin(rquery->type)) { - assert(0); - return; - } - - /* Non-GPU queries. */ - switch (rquery->type) { - case R600_QUERY_DRAW_CALLS: - rquery->begin_result = rctx->num_draw_calls; - return; - case R600_QUERY_REQUESTED_VRAM: - case R600_QUERY_REQUESTED_GTT: - rquery->begin_result = 0; - return; - case R600_QUERY_BUFFER_WAIT_TIME: - rquery->begin_result = rctx->b.ws->query_value(rctx->b.ws, RADEON_BUFFER_WAIT_TIME_NS); - return; - } - - /* Discard the old query buffers. */ - while (prev) { - struct r600_query_buffer *qbuf = prev; - prev = prev->previous; - pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL); - FREE(qbuf); - } - - /* Obtain a new buffer if the current one can't be mapped without a stall. */ - if (r600_rings_is_buffer_referenced(&rctx->b, rquery->buffer.buf->cs_buf, RADEON_USAGE_READWRITE) || - rctx->b.ws->buffer_is_busy(rquery->buffer.buf->buf, RADEON_USAGE_READWRITE)) { - pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL); - rquery->buffer.buf = r600_new_query_buffer(rctx, rquery->type); - } - - rquery->buffer.results_end = 0; - rquery->buffer.previous = NULL; - - r600_emit_query_begin(rctx, rquery); - - if (!r600_is_timer_query(rquery->type)) { - LIST_ADDTAIL(&rquery->list, &rctx->active_nontimer_queries); - } -} - -static void r600_end_query(struct pipe_context *ctx, struct pipe_query *query) -{ - struct r600_context *rctx = (struct r600_context *)ctx; - struct r600_query *rquery = (struct r600_query *)query; - - /* Non-GPU queries. */ - switch (rquery->type) { - case R600_QUERY_DRAW_CALLS: - rquery->end_result = rctx->num_draw_calls; - return; - case R600_QUERY_REQUESTED_VRAM: - rquery->end_result = rctx->b.ws->query_value(rctx->b.ws, RADEON_REQUESTED_VRAM_MEMORY); - return; - case R600_QUERY_REQUESTED_GTT: - rquery->end_result = rctx->b.ws->query_value(rctx->b.ws, RADEON_REQUESTED_GTT_MEMORY); - return; - case R600_QUERY_BUFFER_WAIT_TIME: - rquery->end_result = rctx->b.ws->query_value(rctx->b.ws, RADEON_BUFFER_WAIT_TIME_NS); - return; - } - - r600_emit_query_end(rctx, rquery); - - if (r600_query_needs_begin(rquery->type) && !r600_is_timer_query(rquery->type)) { - LIST_DELINIT(&rquery->list); - } -} - -static unsigned r600_query_read_result(char *map, unsigned start_index, unsigned end_index, - bool test_status_bit) -{ - uint32_t *current_result = (uint32_t*)map; - uint64_t start, end; - - start = (uint64_t)current_result[start_index] | - (uint64_t)current_result[start_index+1] << 32; - end = (uint64_t)current_result[end_index] | - (uint64_t)current_result[end_index+1] << 32; - - if (!test_status_bit || - ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) { - return end - start; - } - return 0; -} - -static boolean r600_get_query_buffer_result(struct r600_context *ctx, - struct r600_query *query, - struct r600_query_buffer *qbuf, - boolean wait, - union pipe_query_result *result) -{ - unsigned results_base = 0; - char *map; - - /* Non-GPU queries. */ - switch (query->type) { - case R600_QUERY_DRAW_CALLS: - case R600_QUERY_REQUESTED_VRAM: - case R600_QUERY_REQUESTED_GTT: - case R600_QUERY_BUFFER_WAIT_TIME: - result->u64 = query->end_result - query->begin_result; - return TRUE; - } - - map = r600_buffer_map_sync_with_rings(&ctx->b, qbuf->buf, - PIPE_TRANSFER_READ | - (wait ? 0 : PIPE_TRANSFER_DONTBLOCK)); - if (!map) - return FALSE; - - /* count all results across all data blocks */ - switch (query->type) { - case PIPE_QUERY_OCCLUSION_COUNTER: - while (results_base != qbuf->results_end) { - result->u64 += - r600_query_read_result(map + results_base, 0, 2, true); - results_base += 16; - } - break; - case PIPE_QUERY_OCCLUSION_PREDICATE: - while (results_base != qbuf->results_end) { - result->b = result->b || - r600_query_read_result(map + results_base, 0, 2, true) != 0; - results_base += 16; - } - break; - case PIPE_QUERY_TIME_ELAPSED: - while (results_base != qbuf->results_end) { - result->u64 += - r600_query_read_result(map + results_base, 0, 2, false); - results_base += query->result_size; - } - break; - case PIPE_QUERY_TIMESTAMP: - { - uint32_t *current_result = (uint32_t*)map; - result->u64 = (uint64_t)current_result[0] | - (uint64_t)current_result[1] << 32; - break; - } - case PIPE_QUERY_PRIMITIVES_EMITTED: - /* SAMPLE_STREAMOUTSTATS stores this structure: - * { - * u64 NumPrimitivesWritten; - * u64 PrimitiveStorageNeeded; - * } - * We only need NumPrimitivesWritten here. */ - while (results_base != qbuf->results_end) { - result->u64 += - r600_query_read_result(map + results_base, 2, 6, true); - results_base += query->result_size; - } - break; - case PIPE_QUERY_PRIMITIVES_GENERATED: - /* Here we read PrimitiveStorageNeeded. */ - while (results_base != qbuf->results_end) { - result->u64 += - r600_query_read_result(map + results_base, 0, 4, true); - results_base += query->result_size; - } - break; - case PIPE_QUERY_SO_STATISTICS: - while (results_base != qbuf->results_end) { - result->so_statistics.num_primitives_written += - r600_query_read_result(map + results_base, 2, 6, true); - result->so_statistics.primitives_storage_needed += - r600_query_read_result(map + results_base, 0, 4, true); - results_base += query->result_size; - } - break; - case PIPE_QUERY_SO_OVERFLOW_PREDICATE: - while (results_base != qbuf->results_end) { - result->b = result->b || - r600_query_read_result(map + results_base, 2, 6, true) != - r600_query_read_result(map + results_base, 0, 4, true); - results_base += query->result_size; - } - break; - case PIPE_QUERY_PIPELINE_STATISTICS: - if (ctx->b.chip_class >= EVERGREEN) { - while (results_base != qbuf->results_end) { - result->pipeline_statistics.ps_invocations += - r600_query_read_result(map + results_base, 0, 22, false); - result->pipeline_statistics.c_primitives += - r600_query_read_result(map + results_base, 2, 24, false); - result->pipeline_statistics.c_invocations += - r600_query_read_result(map + results_base, 4, 26, false); - result->pipeline_statistics.vs_invocations += - r600_query_read_result(map + results_base, 6, 28, false); - result->pipeline_statistics.gs_invocations += - r600_query_read_result(map + results_base, 8, 30, false); - result->pipeline_statistics.gs_primitives += - r600_query_read_result(map + results_base, 10, 32, false); - result->pipeline_statistics.ia_primitives += - r600_query_read_result(map + results_base, 12, 34, false); - result->pipeline_statistics.ia_vertices += - r600_query_read_result(map + results_base, 14, 36, false); - result->pipeline_statistics.hs_invocations += - r600_query_read_result(map + results_base, 16, 38, false); - result->pipeline_statistics.ds_invocations += - r600_query_read_result(map + results_base, 18, 40, false); - result->pipeline_statistics.cs_invocations += - r600_query_read_result(map + results_base, 20, 42, false); - results_base += query->result_size; - } - } else { - while (results_base != qbuf->results_end) { - result->pipeline_statistics.ps_invocations += - r600_query_read_result(map + results_base, 0, 16, false); - result->pipeline_statistics.c_primitives += - r600_query_read_result(map + results_base, 2, 18, false); - result->pipeline_statistics.c_invocations += - r600_query_read_result(map + results_base, 4, 20, false); - result->pipeline_statistics.vs_invocations += - r600_query_read_result(map + results_base, 6, 22, false); - result->pipeline_statistics.gs_invocations += - r600_query_read_result(map + results_base, 8, 24, false); - result->pipeline_statistics.gs_primitives += - r600_query_read_result(map + results_base, 10, 26, false); - result->pipeline_statistics.ia_primitives += - r600_query_read_result(map + results_base, 12, 28, false); - result->pipeline_statistics.ia_vertices += - r600_query_read_result(map + results_base, 14, 30, false); - results_base += query->result_size; - } - } -#if 0 /* for testing */ - printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, " - "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, " - "Clipper prims=%llu, PS=%llu, CS=%llu\n", - result->pipeline_statistics.ia_vertices, - result->pipeline_statistics.ia_primitives, - result->pipeline_statistics.vs_invocations, - result->pipeline_statistics.hs_invocations, - result->pipeline_statistics.ds_invocations, - result->pipeline_statistics.gs_invocations, - result->pipeline_statistics.gs_primitives, - result->pipeline_statistics.c_invocations, - result->pipeline_statistics.c_primitives, - result->pipeline_statistics.ps_invocations, - result->pipeline_statistics.cs_invocations); -#endif - break; - default: - assert(0); - } - - ctx->b.ws->buffer_unmap(qbuf->buf->cs_buf); - return TRUE; -} - -static boolean r600_get_query_result(struct pipe_context *ctx, - struct pipe_query *query, - boolean wait, union pipe_query_result *result) -{ - struct r600_context *rctx = (struct r600_context *)ctx; - struct r600_query *rquery = (struct r600_query *)query; - struct r600_query_buffer *qbuf; - - util_query_clear_result(result, rquery->type); - - for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous) { - if (!r600_get_query_buffer_result(rctx, rquery, qbuf, wait, result)) { - return FALSE; - } - } - - /* Convert the time to expected units. */ - if (rquery->type == PIPE_QUERY_TIME_ELAPSED || - rquery->type == PIPE_QUERY_TIMESTAMP) { - result->u64 = (1000000 * result->u64) / rctx->screen->b.info.r600_clock_crystal_freq; - } - return TRUE; -} - -static void r600_render_condition(struct pipe_context *ctx, - struct pipe_query *query, - boolean condition, - uint mode) -{ - struct r600_context *rctx = (struct r600_context *)ctx; - struct r600_query *rquery = (struct r600_query *)query; - bool wait_flag = false; - - rctx->current_render_cond = query; - rctx->current_render_cond_cond = condition; - rctx->current_render_cond_mode = mode; - - if (query == NULL) { - if (rctx->predicate_drawing) { - rctx->predicate_drawing = false; - r600_emit_query_predication(rctx, NULL, PREDICATION_OP_CLEAR, false); - } - return; - } - - if (mode == PIPE_RENDER_COND_WAIT || - mode == PIPE_RENDER_COND_BY_REGION_WAIT) { - wait_flag = true; - } - - rctx->predicate_drawing = true; - - switch (rquery->type) { - case PIPE_QUERY_OCCLUSION_COUNTER: - case PIPE_QUERY_OCCLUSION_PREDICATE: - r600_emit_query_predication(rctx, rquery, PREDICATION_OP_ZPASS, wait_flag); - break; - case PIPE_QUERY_PRIMITIVES_EMITTED: - case PIPE_QUERY_PRIMITIVES_GENERATED: - case PIPE_QUERY_SO_STATISTICS: - case PIPE_QUERY_SO_OVERFLOW_PREDICATE: - r600_emit_query_predication(rctx, rquery, PREDICATION_OP_PRIMCOUNT, wait_flag); - break; - default: - assert(0); - } -} - -void r600_suspend_nontimer_queries(struct r600_context *ctx) -{ - struct r600_query *query; - - LIST_FOR_EACH_ENTRY(query, &ctx->active_nontimer_queries, list) { - r600_emit_query_end(ctx, query); - } - assert(ctx->num_cs_dw_nontimer_queries_suspend == 0); -} - -void r600_resume_nontimer_queries(struct r600_context *ctx) -{ - struct r600_query *query; - - assert(ctx->num_cs_dw_nontimer_queries_suspend == 0); - - LIST_FOR_EACH_ENTRY(query, &ctx->active_nontimer_queries, list) { - r600_emit_query_begin(ctx, query); - } -} - -void r600_init_query_functions(struct r600_context *rctx) -{ - rctx->b.b.create_query = r600_create_query; - rctx->b.b.destroy_query = r600_destroy_query; - rctx->b.b.begin_query = r600_begin_query; - rctx->b.b.end_query = r600_end_query; - rctx->b.b.get_query_result = r600_get_query_result; - - if (rctx->screen->b.info.r600_num_backends > 0) - rctx->b.b.render_condition = r600_render_condition; -} diff --git a/src/gallium/drivers/r600/r600_state_common.c b/src/gallium/drivers/r600/r600_state_common.c index 3dc79910247..3a6f16753f4 100644 --- a/src/gallium/drivers/r600/r600_state_common.c +++ b/src/gallium/drivers/r600/r600_state_common.c @@ -1339,10 +1339,10 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info } /* Draw packets. */ - cs->buf[cs->cdw++] = PKT3(PKT3_NUM_INSTANCES, 0, rctx->predicate_drawing); + cs->buf[cs->cdw++] = PKT3(PKT3_NUM_INSTANCES, 0, rctx->b.predicate_drawing); cs->buf[cs->cdw++] = info.instance_count; if (info.indexed) { - cs->buf[cs->cdw++] = PKT3(PKT3_INDEX_TYPE, 0, rctx->predicate_drawing); + cs->buf[cs->cdw++] = PKT3(PKT3_INDEX_TYPE, 0, rctx->b.predicate_drawing); cs->buf[cs->cdw++] = ib.index_size == 4 ? (VGT_INDEX_32 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_32_BIT : 0)) : (VGT_INDEX_16 | (R600_BIG_ENDIAN ? VGT_DMA_SWAP_16_BIT : 0)); @@ -1350,19 +1350,19 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info if (ib.user_buffer) { unsigned size_bytes = info.count*ib.index_size; unsigned size_dw = align(size_bytes, 4) / 4; - cs->buf[cs->cdw++] = PKT3(PKT3_DRAW_INDEX_IMMD, 1 + size_dw, rctx->predicate_drawing); + cs->buf[cs->cdw++] = PKT3(PKT3_DRAW_INDEX_IMMD, 1 + size_dw, rctx->b.predicate_drawing); cs->buf[cs->cdw++] = info.count; cs->buf[cs->cdw++] = V_0287F0_DI_SRC_SEL_IMMEDIATE; memcpy(cs->buf+cs->cdw, ib.user_buffer, size_bytes); cs->cdw += size_dw; } else { uint64_t va = r600_resource_va(ctx->screen, ib.buffer) + ib.offset; - cs->buf[cs->cdw++] = PKT3(PKT3_DRAW_INDEX, 3, rctx->predicate_drawing); + cs->buf[cs->cdw++] = PKT3(PKT3_DRAW_INDEX, 3, rctx->b.predicate_drawing); cs->buf[cs->cdw++] = va; cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF; cs->buf[cs->cdw++] = info.count; cs->buf[cs->cdw++] = V_0287F0_DI_SRC_SEL_DMA; - cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, rctx->predicate_drawing); + cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, rctx->b.predicate_drawing); cs->buf[cs->cdw++] = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, (struct r600_resource*)ib.buffer, RADEON_USAGE_READ); } } else { @@ -1383,7 +1383,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info cs->buf[cs->cdw++] = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, t->buf_filled_size, RADEON_USAGE_READ); } - cs->buf[cs->cdw++] = PKT3(PKT3_DRAW_INDEX_AUTO, 1, rctx->predicate_drawing); + cs->buf[cs->cdw++] = PKT3(PKT3_DRAW_INDEX_AUTO, 1, rctx->b.predicate_drawing); cs->buf[cs->cdw++] = info.count; cs->buf[cs->cdw++] = V_0287F0_DI_SRC_SEL_AUTO_INDEX | (info.count_from_stream_output ? S_0287F0_USE_OPAQUE(1) : 0); @@ -1416,7 +1416,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info } pipe_resource_reference(&ib.buffer, NULL); - rctx->num_draw_calls++; + rctx->b.num_draw_calls++; } void r600_draw_rectangle(struct blitter_context *blitter, @@ -2127,6 +2127,22 @@ static void r600_invalidate_buffer(struct pipe_context *ctx, struct pipe_resourc /* XXX TODO: texture buffer objects */ } +static void r600_set_occlusion_query_state(struct pipe_context *ctx, bool enable) +{ + struct r600_context *rctx = (struct r600_context*)ctx; + + if (rctx->db_misc_state.occlusion_query_enabled != enable) { + rctx->db_misc_state.occlusion_query_enabled = enable; + rctx->db_misc_state.atom.dirty = true; + } +} + +static void r600_need_gfx_cs_space(struct pipe_context *ctx, unsigned num_dw, + bool include_draw_vbo) +{ + r600_need_cs_space((struct r600_context*)ctx, num_dw, include_draw_vbo); +} + /* keep this at the end of this file, please */ void r600_init_common_state_functions(struct r600_context *rctx) { @@ -2163,6 +2179,8 @@ void r600_init_common_state_functions(struct r600_context *rctx) rctx->b.b.surface_destroy = r600_surface_destroy; rctx->b.b.draw_vbo = r600_draw_vbo; rctx->b.invalidate_buffer = r600_invalidate_buffer; + rctx->b.set_occlusion_query_state = r600_set_occlusion_query_state; + rctx->b.need_gfx_cs_space = r600_need_gfx_cs_space; } void r600_trace_emit(struct r600_context *rctx) diff --git a/src/gallium/drivers/radeon/Makefile.sources b/src/gallium/drivers/radeon/Makefile.sources index 540ac05b9e8..7c028388e35 100644 --- a/src/gallium/drivers/radeon/Makefile.sources +++ b/src/gallium/drivers/radeon/Makefile.sources @@ -1,6 +1,7 @@ C_SOURCES := \ r600_buffer_common.c \ r600_pipe_common.c \ + r600_query.c \ r600_streamout.c \ r600_texture.c \ radeon_uvd.c diff --git a/src/gallium/drivers/radeon/r600_pipe_common.c b/src/gallium/drivers/radeon/r600_pipe_common.c index 07bf383f74d..ad6dd253a1d 100644 --- a/src/gallium/drivers/radeon/r600_pipe_common.c +++ b/src/gallium/drivers/radeon/r600_pipe_common.c @@ -228,11 +228,14 @@ bool r600_common_context_init(struct r600_common_context *rctx, sizeof(struct r600_transfer), 64, UTIL_SLAB_SINGLETHREADED); + rctx->screen = rscreen; rctx->ws = rscreen->ws; rctx->family = rscreen->family; rctx->chip_class = rscreen->chip_class; + rctx->max_db = rscreen->chip_class >= EVERGREEN ? 8 : 4; r600_streamout_init(rctx); + r600_query_init(rctx); rctx->allocator_so_filled_size = u_suballocator_create(&rctx->b, 4096, 4, 0, PIPE_USAGE_STATIC, TRUE); diff --git a/src/gallium/drivers/radeon/r600_pipe_common.h b/src/gallium/drivers/radeon/r600_pipe_common.h index 6ee20f6fc77..48674c6e625 100644 --- a/src/gallium/drivers/radeon/r600_pipe_common.h +++ b/src/gallium/drivers/radeon/r600_pipe_common.h @@ -36,6 +36,7 @@ #include "../../winsys/radeon/drm/radeon_winsys.h" +#include "util/u_double_list.h" #include "util/u_range.h" #include "util/u_slab.h" #include "util/u_suballoc.h" @@ -45,6 +46,11 @@ #define R600_RESOURCE_FLAG_FLUSHED_DEPTH (PIPE_RESOURCE_FLAG_DRV_PRIV << 1) #define R600_RESOURCE_FLAG_FORCE_TILING (PIPE_RESOURCE_FLAG_DRV_PRIV << 2) +#define R600_QUERY_DRAW_CALLS (PIPE_QUERY_DRIVER_SPECIFIC + 0) +#define R600_QUERY_REQUESTED_VRAM (PIPE_QUERY_DRIVER_SPECIFIC + 1) +#define R600_QUERY_REQUESTED_GTT (PIPE_QUERY_DRIVER_SPECIFIC + 2) +#define R600_QUERY_BUFFER_WAIT_TIME (PIPE_QUERY_DRIVER_SPECIFIC + 3) + /* read caches */ #define R600_CONTEXT_INV_VERTEX_CACHE (1 << 0) #define R600_CONTEXT_INV_TEX_CACHE (1 << 1) @@ -225,6 +231,7 @@ struct r600_rings { struct r600_common_context { struct pipe_context b; /* base class */ + struct r600_common_screen *screen; struct radeon_winsys *ws; enum radeon_family family; enum chip_class chip_class; @@ -244,6 +251,29 @@ struct r600_common_context { /* Additional context states. */ unsigned flags; /* flush flags */ + /* Queries. */ + /* The list of active queries. Only one query of each type can be active. */ + int num_occlusion_queries; + int num_pipelinestat_queries; + /* Keep track of non-timer queries, because they should be suspended + * during context flushing. + * The timer queries (TIME_ELAPSED) shouldn't be suspended. */ + struct list_head active_nontimer_queries; + unsigned num_cs_dw_nontimer_queries_suspend; + /* If queries have been suspended. */ + bool nontimer_queries_suspended; + /* Additional hardware info. */ + unsigned backend_mask; + unsigned max_db; /* for OQ */ + /* Misc stats. */ + unsigned num_draw_calls; + + /* Render condition. */ + struct pipe_query *current_render_cond; + unsigned current_render_cond_mode; + boolean current_render_cond_cond; + boolean predicate_drawing; + /* Copy one resource to another using async DMA. * False is returned if the copy couldn't be done. */ boolean (*dma_copy)(struct pipe_context *ctx, @@ -267,6 +297,13 @@ struct r600_common_context { /* Reallocate the buffer and update all resource bindings where * the buffer is bound, including all resource descriptors. */ void (*invalidate_buffer)(struct pipe_context *ctx, struct pipe_resource *buf); + + /* Enable or disable occlusion queries. */ + void (*set_occlusion_query_state)(struct pipe_context *ctx, bool enable); + + /* This ensures there is enough space in the command stream. */ + void (*need_gfx_cs_space)(struct pipe_context *ctx, unsigned num_dw, + bool include_draw_vbo); }; /* r600_buffer.c */ @@ -297,6 +334,12 @@ bool r600_can_dump_shader(struct r600_common_screen *rscreen, void r600_screen_clear_buffer(struct r600_common_screen *rscreen, struct pipe_resource *dst, unsigned offset, unsigned size, unsigned value); +/* r600_query.c */ +void r600_query_init(struct r600_common_context *rctx); +void r600_suspend_nontimer_queries(struct r600_common_context *ctx); +void r600_resume_nontimer_queries(struct r600_common_context *ctx); +void r600_query_init_backend_mask(struct r600_common_context *ctx); + /* r600_streamout.c */ void r600_streamout_buffers_dirty(struct r600_common_context *rctx); void r600_set_streamout_targets(struct pipe_context *ctx, diff --git a/src/gallium/drivers/radeon/r600_query.c b/src/gallium/drivers/radeon/r600_query.c new file mode 100644 index 00000000000..ea9ad11e37f --- /dev/null +++ b/src/gallium/drivers/radeon/r600_query.c @@ -0,0 +1,863 @@ +/* + * Copyright 2010 Jerome Glisse + * Copyright 2014 Marek Olšák + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * on the rights to use, copy, modify, merge, publish, distribute, sub + * license, and/or sell copies of the Software, and to permit persons to whom + * the Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "r600_cs.h" +#include "util/u_memory.h" + + +struct r600_query_buffer { + /* The buffer where query results are stored. */ + struct r600_resource *buf; + /* Offset of the next free result after current query data */ + unsigned results_end; + /* If a query buffer is full, a new buffer is created and the old one + * is put in here. When we calculate the result, we sum up the samples + * from all buffers. */ + struct r600_query_buffer *previous; +}; + +struct r600_query { + /* The query buffer and how many results are in it. */ + struct r600_query_buffer buffer; + /* The type of query */ + unsigned type; + /* Size of the result in memory for both begin_query and end_query, + * this can be one or two numbers, or it could even be a size of a structure. */ + unsigned result_size; + /* The number of dwords for begin_query or end_query. */ + unsigned num_cs_dw; + /* linked list of queries */ + struct list_head list; + /* for custom non-GPU queries */ + uint64_t begin_result; + uint64_t end_result; +}; + + +static bool r600_is_timer_query(unsigned type) +{ + return type == PIPE_QUERY_TIME_ELAPSED || + type == PIPE_QUERY_TIMESTAMP || + type == PIPE_QUERY_TIMESTAMP_DISJOINT; +} + +static bool r600_query_needs_begin(unsigned type) +{ + return type != PIPE_QUERY_GPU_FINISHED && + type != PIPE_QUERY_TIMESTAMP; +} + +static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx, unsigned type) +{ + unsigned j, i, num_results, buf_size = 4096; + uint32_t *results; + + /* Non-GPU queries. */ + switch (type) { + case R600_QUERY_DRAW_CALLS: + case R600_QUERY_REQUESTED_VRAM: + case R600_QUERY_REQUESTED_GTT: + case R600_QUERY_BUFFER_WAIT_TIME: + return NULL; + } + + /* Queries are normally read by the CPU after + * being written by the gpu, hence staging is probably a good + * usage pattern. + */ + struct r600_resource *buf = (struct r600_resource*) + pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM, + PIPE_USAGE_STAGING, buf_size); + + switch (type) { + case PIPE_QUERY_OCCLUSION_COUNTER: + case PIPE_QUERY_OCCLUSION_PREDICATE: + results = r600_buffer_map_sync_with_rings(ctx, buf, PIPE_TRANSFER_WRITE); + memset(results, 0, buf_size); + + /* Set top bits for unused backends. */ + num_results = buf_size / (16 * ctx->max_db); + for (j = 0; j < num_results; j++) { + for (i = 0; i < ctx->max_db; i++) { + if (!(ctx->backend_mask & (1<max_db; + } + ctx->ws->buffer_unmap(buf->cs_buf); + break; + case PIPE_QUERY_TIME_ELAPSED: + case PIPE_QUERY_TIMESTAMP: + break; + case PIPE_QUERY_PRIMITIVES_EMITTED: + case PIPE_QUERY_PRIMITIVES_GENERATED: + case PIPE_QUERY_SO_STATISTICS: + case PIPE_QUERY_SO_OVERFLOW_PREDICATE: + case PIPE_QUERY_PIPELINE_STATISTICS: + results = r600_buffer_map_sync_with_rings(ctx, buf, PIPE_TRANSFER_WRITE); + memset(results, 0, buf_size); + ctx->ws->buffer_unmap(buf->cs_buf); + break; + default: + assert(0); + } + return buf; +} + +static void r600_update_occlusion_query_state(struct r600_common_context *rctx, + unsigned type, int diff) +{ + if (type == PIPE_QUERY_OCCLUSION_COUNTER || + type == PIPE_QUERY_OCCLUSION_PREDICATE) { + bool old_enable = rctx->num_occlusion_queries != 0; + bool enable; + + rctx->num_occlusion_queries += diff; + assert(rctx->num_occlusion_queries >= 0); + + enable = rctx->num_occlusion_queries != 0; + + if (enable != old_enable) { + rctx->set_occlusion_query_state(&rctx->b, enable); + } + } +} + +static void r600_emit_query_begin(struct r600_common_context *ctx, struct r600_query *query) +{ + struct radeon_winsys_cs *cs = ctx->rings.gfx.cs; + uint64_t va; + + r600_update_occlusion_query_state(ctx, query->type, 1); + ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw * 2, TRUE); + + /* Get a new query buffer if needed. */ + if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) { + struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer); + *qbuf = query->buffer; + query->buffer.buf = r600_new_query_buffer(ctx, query->type); + query->buffer.results_end = 0; + query->buffer.previous = qbuf; + } + + /* emit begin query */ + va = r600_resource_va(ctx->b.screen, (void*)query->buffer.buf); + va += query->buffer.results_end; + + switch (query->type) { + case PIPE_QUERY_OCCLUSION_COUNTER: + case PIPE_QUERY_OCCLUSION_PREDICATE: + cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0); + cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1); + cs->buf[cs->cdw++] = va; + cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF; + break; + case PIPE_QUERY_PRIMITIVES_EMITTED: + case PIPE_QUERY_PRIMITIVES_GENERATED: + case PIPE_QUERY_SO_STATISTICS: + case PIPE_QUERY_SO_OVERFLOW_PREDICATE: + cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0); + cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SAMPLE_STREAMOUTSTATS) | EVENT_INDEX(3); + cs->buf[cs->cdw++] = va; + cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF; + break; + case PIPE_QUERY_TIME_ELAPSED: + cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0); + cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5); + cs->buf[cs->cdw++] = va; + cs->buf[cs->cdw++] = (3 << 29) | ((va >> 32UL) & 0xFF); + cs->buf[cs->cdw++] = 0; + cs->buf[cs->cdw++] = 0; + break; + case PIPE_QUERY_PIPELINE_STATISTICS: + if (!ctx->num_pipelinestat_queries) { + cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0); + cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_START) | EVENT_INDEX(0); + } + ctx->num_pipelinestat_queries++; + cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0); + cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2); + cs->buf[cs->cdw++] = va; + cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF; + break; + default: + assert(0); + } + cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); + cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, &ctx->rings.gfx, query->buffer.buf, RADEON_USAGE_WRITE); + + if (!r600_is_timer_query(query->type)) { + ctx->num_cs_dw_nontimer_queries_suspend += query->num_cs_dw; + } +} + +static void r600_emit_query_end(struct r600_common_context *ctx, struct r600_query *query) +{ + struct radeon_winsys_cs *cs = ctx->rings.gfx.cs; + uint64_t va; + + /* The queries which need begin already called this in begin_query. */ + if (!r600_query_needs_begin(query->type)) { + ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw, FALSE); + } + + va = r600_resource_va(ctx->b.screen, (void*)query->buffer.buf); + /* emit end query */ + switch (query->type) { + case PIPE_QUERY_OCCLUSION_COUNTER: + case PIPE_QUERY_OCCLUSION_PREDICATE: + va += query->buffer.results_end + 8; + cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0); + cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1); + cs->buf[cs->cdw++] = va; + cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF; + break; + case PIPE_QUERY_PRIMITIVES_EMITTED: + case PIPE_QUERY_PRIMITIVES_GENERATED: + case PIPE_QUERY_SO_STATISTICS: + case PIPE_QUERY_SO_OVERFLOW_PREDICATE: + va += query->buffer.results_end + query->result_size/2; + cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0); + cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SAMPLE_STREAMOUTSTATS) | EVENT_INDEX(3); + cs->buf[cs->cdw++] = va; + cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF; + break; + case PIPE_QUERY_TIME_ELAPSED: + va += query->buffer.results_end + query->result_size/2; + /* fall through */ + case PIPE_QUERY_TIMESTAMP: + cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0); + cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5); + cs->buf[cs->cdw++] = va; + cs->buf[cs->cdw++] = (3 << 29) | ((va >> 32UL) & 0xFF); + cs->buf[cs->cdw++] = 0; + cs->buf[cs->cdw++] = 0; + break; + case PIPE_QUERY_PIPELINE_STATISTICS: + assert(ctx->num_pipelinestat_queries > 0); + ctx->num_pipelinestat_queries--; + if (!ctx->num_pipelinestat_queries) { + cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0); + cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_STOP) | EVENT_INDEX(0); + } + va += query->buffer.results_end + query->result_size/2; + cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0); + cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2); + cs->buf[cs->cdw++] = va; + cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF; + break; + default: + assert(0); + } + cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); + cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, &ctx->rings.gfx, query->buffer.buf, RADEON_USAGE_WRITE); + + query->buffer.results_end += query->result_size; + + if (r600_query_needs_begin(query->type)) { + if (!r600_is_timer_query(query->type)) { + ctx->num_cs_dw_nontimer_queries_suspend -= query->num_cs_dw; + } + } + + r600_update_occlusion_query_state(ctx, query->type, -1); +} + +static void r600_emit_query_predication(struct r600_common_context *ctx, struct r600_query *query, + int operation, bool flag_wait) +{ + struct radeon_winsys_cs *cs = ctx->rings.gfx.cs; + + if (operation == PREDICATION_OP_CLEAR) { + ctx->need_gfx_cs_space(&ctx->b, 3, FALSE); + + cs->buf[cs->cdw++] = PKT3(PKT3_SET_PREDICATION, 1, 0); + cs->buf[cs->cdw++] = 0; + cs->buf[cs->cdw++] = PRED_OP(PREDICATION_OP_CLEAR); + } else { + struct r600_query_buffer *qbuf; + unsigned count; + uint32_t op; + + /* Find how many results there are. */ + count = 0; + for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) { + count += qbuf->results_end / query->result_size; + } + + ctx->need_gfx_cs_space(&ctx->b, 5 * count, TRUE); + + op = PRED_OP(operation) | PREDICATION_DRAW_VISIBLE | + (flag_wait ? PREDICATION_HINT_WAIT : PREDICATION_HINT_NOWAIT_DRAW); + + /* emit predicate packets for all data blocks */ + for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) { + unsigned results_base = 0; + uint64_t va = r600_resource_va(ctx->b.screen, &qbuf->buf->b.b); + + while (results_base < qbuf->results_end) { + cs->buf[cs->cdw++] = PKT3(PKT3_SET_PREDICATION, 1, 0); + cs->buf[cs->cdw++] = (va + results_base) & 0xFFFFFFFFUL; + cs->buf[cs->cdw++] = op | (((va + results_base) >> 32UL) & 0xFF); + cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); + cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, &ctx->rings.gfx, qbuf->buf, RADEON_USAGE_READ); + results_base += query->result_size; + + /* set CONTINUE bit for all packets except the first */ + op |= PREDICATION_CONTINUE; + } + } while (qbuf); + } +} + +static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type) +{ + struct r600_common_context *rctx = (struct r600_common_context *)ctx; + struct r600_query *query; + bool skip_allocation = false; + + query = CALLOC_STRUCT(r600_query); + if (query == NULL) + return NULL; + + query->type = query_type; + + switch (query_type) { + case PIPE_QUERY_OCCLUSION_COUNTER: + case PIPE_QUERY_OCCLUSION_PREDICATE: + query->result_size = 16 * rctx->max_db; + query->num_cs_dw = 6; + break; + case PIPE_QUERY_TIME_ELAPSED: + query->result_size = 16; + query->num_cs_dw = 8; + break; + case PIPE_QUERY_TIMESTAMP: + query->result_size = 8; + query->num_cs_dw = 8; + break; + case PIPE_QUERY_PRIMITIVES_EMITTED: + case PIPE_QUERY_PRIMITIVES_GENERATED: + case PIPE_QUERY_SO_STATISTICS: + case PIPE_QUERY_SO_OVERFLOW_PREDICATE: + /* NumPrimitivesWritten, PrimitiveStorageNeeded. */ + query->result_size = 32; + query->num_cs_dw = 6; + break; + case PIPE_QUERY_PIPELINE_STATISTICS: + /* 11 values on EG, 8 on R600. */ + query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16; + query->num_cs_dw = 8; + break; + /* Non-GPU queries. */ + case R600_QUERY_DRAW_CALLS: + case R600_QUERY_REQUESTED_VRAM: + case R600_QUERY_REQUESTED_GTT: + case R600_QUERY_BUFFER_WAIT_TIME: + skip_allocation = true; + break; + default: + assert(0); + FREE(query); + return NULL; + } + + if (!skip_allocation) { + query->buffer.buf = r600_new_query_buffer(rctx, query_type); + if (!query->buffer.buf) { + FREE(query); + return NULL; + } + } + return (struct pipe_query*)query; +} + +static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query) +{ + struct r600_query *rquery = (struct r600_query*)query; + struct r600_query_buffer *prev = rquery->buffer.previous; + + /* Release all query buffers. */ + while (prev) { + struct r600_query_buffer *qbuf = prev; + prev = prev->previous; + pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL); + FREE(qbuf); + } + + pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL); + FREE(query); +} + +static void r600_begin_query(struct pipe_context *ctx, struct pipe_query *query) +{ + struct r600_common_context *rctx = (struct r600_common_context *)ctx; + struct r600_query *rquery = (struct r600_query *)query; + struct r600_query_buffer *prev = rquery->buffer.previous; + + if (!r600_query_needs_begin(rquery->type)) { + assert(0); + return; + } + + /* Non-GPU queries. */ + switch (rquery->type) { + case R600_QUERY_DRAW_CALLS: + rquery->begin_result = rctx->num_draw_calls; + return; + case R600_QUERY_REQUESTED_VRAM: + case R600_QUERY_REQUESTED_GTT: + rquery->begin_result = 0; + return; + case R600_QUERY_BUFFER_WAIT_TIME: + rquery->begin_result = rctx->ws->query_value(rctx->ws, RADEON_BUFFER_WAIT_TIME_NS); + return; + } + + /* Discard the old query buffers. */ + while (prev) { + struct r600_query_buffer *qbuf = prev; + prev = prev->previous; + pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL); + FREE(qbuf); + } + + /* Obtain a new buffer if the current one can't be mapped without a stall. */ + if (r600_rings_is_buffer_referenced(rctx, rquery->buffer.buf->cs_buf, RADEON_USAGE_READWRITE) || + rctx->ws->buffer_is_busy(rquery->buffer.buf->buf, RADEON_USAGE_READWRITE)) { + pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL); + rquery->buffer.buf = r600_new_query_buffer(rctx, rquery->type); + } + + rquery->buffer.results_end = 0; + rquery->buffer.previous = NULL; + + r600_emit_query_begin(rctx, rquery); + + if (!r600_is_timer_query(rquery->type)) { + LIST_ADDTAIL(&rquery->list, &rctx->active_nontimer_queries); + } +} + +static void r600_end_query(struct pipe_context *ctx, struct pipe_query *query) +{ + struct r600_common_context *rctx = (struct r600_common_context *)ctx; + struct r600_query *rquery = (struct r600_query *)query; + + /* Non-GPU queries. */ + switch (rquery->type) { + case R600_QUERY_DRAW_CALLS: + rquery->end_result = rctx->num_draw_calls; + return; + case R600_QUERY_REQUESTED_VRAM: + rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_REQUESTED_VRAM_MEMORY); + return; + case R600_QUERY_REQUESTED_GTT: + rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_REQUESTED_GTT_MEMORY); + return; + case R600_QUERY_BUFFER_WAIT_TIME: + rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_BUFFER_WAIT_TIME_NS); + return; + } + + r600_emit_query_end(rctx, rquery); + + if (r600_query_needs_begin(rquery->type) && !r600_is_timer_query(rquery->type)) { + LIST_DELINIT(&rquery->list); + } +} + +static unsigned r600_query_read_result(char *map, unsigned start_index, unsigned end_index, + bool test_status_bit) +{ + uint32_t *current_result = (uint32_t*)map; + uint64_t start, end; + + start = (uint64_t)current_result[start_index] | + (uint64_t)current_result[start_index+1] << 32; + end = (uint64_t)current_result[end_index] | + (uint64_t)current_result[end_index+1] << 32; + + if (!test_status_bit || + ((start & 0x8000000000000000UL) && (end & 0x8000000000000000UL))) { + return end - start; + } + return 0; +} + +static boolean r600_get_query_buffer_result(struct r600_common_context *ctx, + struct r600_query *query, + struct r600_query_buffer *qbuf, + boolean wait, + union pipe_query_result *result) +{ + unsigned results_base = 0; + char *map; + + /* Non-GPU queries. */ + switch (query->type) { + case R600_QUERY_DRAW_CALLS: + case R600_QUERY_REQUESTED_VRAM: + case R600_QUERY_REQUESTED_GTT: + case R600_QUERY_BUFFER_WAIT_TIME: + result->u64 = query->end_result - query->begin_result; + return TRUE; + } + + map = r600_buffer_map_sync_with_rings(ctx, qbuf->buf, + PIPE_TRANSFER_READ | + (wait ? 0 : PIPE_TRANSFER_DONTBLOCK)); + if (!map) + return FALSE; + + /* count all results across all data blocks */ + switch (query->type) { + case PIPE_QUERY_OCCLUSION_COUNTER: + while (results_base != qbuf->results_end) { + result->u64 += + r600_query_read_result(map + results_base, 0, 2, true); + results_base += 16; + } + break; + case PIPE_QUERY_OCCLUSION_PREDICATE: + while (results_base != qbuf->results_end) { + result->b = result->b || + r600_query_read_result(map + results_base, 0, 2, true) != 0; + results_base += 16; + } + break; + case PIPE_QUERY_TIME_ELAPSED: + while (results_base != qbuf->results_end) { + result->u64 += + r600_query_read_result(map + results_base, 0, 2, false); + results_base += query->result_size; + } + break; + case PIPE_QUERY_TIMESTAMP: + { + uint32_t *current_result = (uint32_t*)map; + result->u64 = (uint64_t)current_result[0] | + (uint64_t)current_result[1] << 32; + break; + } + case PIPE_QUERY_PRIMITIVES_EMITTED: + /* SAMPLE_STREAMOUTSTATS stores this structure: + * { + * u64 NumPrimitivesWritten; + * u64 PrimitiveStorageNeeded; + * } + * We only need NumPrimitivesWritten here. */ + while (results_base != qbuf->results_end) { + result->u64 += + r600_query_read_result(map + results_base, 2, 6, true); + results_base += query->result_size; + } + break; + case PIPE_QUERY_PRIMITIVES_GENERATED: + /* Here we read PrimitiveStorageNeeded. */ + while (results_base != qbuf->results_end) { + result->u64 += + r600_query_read_result(map + results_base, 0, 4, true); + results_base += query->result_size; + } + break; + case PIPE_QUERY_SO_STATISTICS: + while (results_base != qbuf->results_end) { + result->so_statistics.num_primitives_written += + r600_query_read_result(map + results_base, 2, 6, true); + result->so_statistics.primitives_storage_needed += + r600_query_read_result(map + results_base, 0, 4, true); + results_base += query->result_size; + } + break; + case PIPE_QUERY_SO_OVERFLOW_PREDICATE: + while (results_base != qbuf->results_end) { + result->b = result->b || + r600_query_read_result(map + results_base, 2, 6, true) != + r600_query_read_result(map + results_base, 0, 4, true); + results_base += query->result_size; + } + break; + case PIPE_QUERY_PIPELINE_STATISTICS: + if (ctx->chip_class >= EVERGREEN) { + while (results_base != qbuf->results_end) { + result->pipeline_statistics.ps_invocations += + r600_query_read_result(map + results_base, 0, 22, false); + result->pipeline_statistics.c_primitives += + r600_query_read_result(map + results_base, 2, 24, false); + result->pipeline_statistics.c_invocations += + r600_query_read_result(map + results_base, 4, 26, false); + result->pipeline_statistics.vs_invocations += + r600_query_read_result(map + results_base, 6, 28, false); + result->pipeline_statistics.gs_invocations += + r600_query_read_result(map + results_base, 8, 30, false); + result->pipeline_statistics.gs_primitives += + r600_query_read_result(map + results_base, 10, 32, false); + result->pipeline_statistics.ia_primitives += + r600_query_read_result(map + results_base, 12, 34, false); + result->pipeline_statistics.ia_vertices += + r600_query_read_result(map + results_base, 14, 36, false); + result->pipeline_statistics.hs_invocations += + r600_query_read_result(map + results_base, 16, 38, false); + result->pipeline_statistics.ds_invocations += + r600_query_read_result(map + results_base, 18, 40, false); + result->pipeline_statistics.cs_invocations += + r600_query_read_result(map + results_base, 20, 42, false); + results_base += query->result_size; + } + } else { + while (results_base != qbuf->results_end) { + result->pipeline_statistics.ps_invocations += + r600_query_read_result(map + results_base, 0, 16, false); + result->pipeline_statistics.c_primitives += + r600_query_read_result(map + results_base, 2, 18, false); + result->pipeline_statistics.c_invocations += + r600_query_read_result(map + results_base, 4, 20, false); + result->pipeline_statistics.vs_invocations += + r600_query_read_result(map + results_base, 6, 22, false); + result->pipeline_statistics.gs_invocations += + r600_query_read_result(map + results_base, 8, 24, false); + result->pipeline_statistics.gs_primitives += + r600_query_read_result(map + results_base, 10, 26, false); + result->pipeline_statistics.ia_primitives += + r600_query_read_result(map + results_base, 12, 28, false); + result->pipeline_statistics.ia_vertices += + r600_query_read_result(map + results_base, 14, 30, false); + results_base += query->result_size; + } + } +#if 0 /* for testing */ + printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, " + "DS=%llu, GS=%llu, GS prims=%llu, Clipper=%llu, " + "Clipper prims=%llu, PS=%llu, CS=%llu\n", + result->pipeline_statistics.ia_vertices, + result->pipeline_statistics.ia_primitives, + result->pipeline_statistics.vs_invocations, + result->pipeline_statistics.hs_invocations, + result->pipeline_statistics.ds_invocations, + result->pipeline_statistics.gs_invocations, + result->pipeline_statistics.gs_primitives, + result->pipeline_statistics.c_invocations, + result->pipeline_statistics.c_primitives, + result->pipeline_statistics.ps_invocations, + result->pipeline_statistics.cs_invocations); +#endif + break; + default: + assert(0); + } + + ctx->ws->buffer_unmap(qbuf->buf->cs_buf); + return TRUE; +} + +static boolean r600_get_query_result(struct pipe_context *ctx, + struct pipe_query *query, + boolean wait, union pipe_query_result *result) +{ + struct r600_common_context *rctx = (struct r600_common_context *)ctx; + struct r600_query *rquery = (struct r600_query *)query; + struct r600_query_buffer *qbuf; + + util_query_clear_result(result, rquery->type); + + for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous) { + if (!r600_get_query_buffer_result(rctx, rquery, qbuf, wait, result)) { + return FALSE; + } + } + + /* Convert the time to expected units. */ + if (rquery->type == PIPE_QUERY_TIME_ELAPSED || + rquery->type == PIPE_QUERY_TIMESTAMP) { + result->u64 = (1000000 * result->u64) / rctx->screen->info.r600_clock_crystal_freq; + } + return TRUE; +} + +static void r600_render_condition(struct pipe_context *ctx, + struct pipe_query *query, + boolean condition, + uint mode) +{ + struct r600_common_context *rctx = (struct r600_common_context *)ctx; + struct r600_query *rquery = (struct r600_query *)query; + bool wait_flag = false; + + rctx->current_render_cond = query; + rctx->current_render_cond_cond = condition; + rctx->current_render_cond_mode = mode; + + if (query == NULL) { + if (rctx->predicate_drawing) { + rctx->predicate_drawing = false; + r600_emit_query_predication(rctx, NULL, PREDICATION_OP_CLEAR, false); + } + return; + } + + if (mode == PIPE_RENDER_COND_WAIT || + mode == PIPE_RENDER_COND_BY_REGION_WAIT) { + wait_flag = true; + } + + rctx->predicate_drawing = true; + + switch (rquery->type) { + case PIPE_QUERY_OCCLUSION_COUNTER: + case PIPE_QUERY_OCCLUSION_PREDICATE: + r600_emit_query_predication(rctx, rquery, PREDICATION_OP_ZPASS, wait_flag); + break; + case PIPE_QUERY_PRIMITIVES_EMITTED: + case PIPE_QUERY_PRIMITIVES_GENERATED: + case PIPE_QUERY_SO_STATISTICS: + case PIPE_QUERY_SO_OVERFLOW_PREDICATE: + r600_emit_query_predication(rctx, rquery, PREDICATION_OP_PRIMCOUNT, wait_flag); + break; + default: + assert(0); + } +} + +void r600_suspend_nontimer_queries(struct r600_common_context *ctx) +{ + struct r600_query *query; + + LIST_FOR_EACH_ENTRY(query, &ctx->active_nontimer_queries, list) { + r600_emit_query_end(ctx, query); + } + assert(ctx->num_cs_dw_nontimer_queries_suspend == 0); +} + +void r600_resume_nontimer_queries(struct r600_common_context *ctx) +{ + struct r600_query *query; + + assert(ctx->num_cs_dw_nontimer_queries_suspend == 0); + + LIST_FOR_EACH_ENTRY(query, &ctx->active_nontimer_queries, list) { + r600_emit_query_begin(ctx, query); + } +} + +/* Get backends mask */ +void r600_query_init_backend_mask(struct r600_common_context *ctx) +{ + struct radeon_winsys_cs *cs = ctx->rings.gfx.cs; + struct r600_resource *buffer; + uint32_t *results; + unsigned num_backends = ctx->screen->info.r600_num_backends; + unsigned i, mask = 0; + uint64_t va; + + /* if backend_map query is supported by the kernel */ + if (ctx->screen->info.r600_backend_map_valid) { + unsigned num_tile_pipes = ctx->screen->info.r600_num_tile_pipes; + unsigned backend_map = ctx->screen->info.r600_backend_map; + unsigned item_width, item_mask; + + if (ctx->chip_class >= EVERGREEN) { + item_width = 4; + item_mask = 0x7; + } else { + item_width = 2; + item_mask = 0x3; + } + + while(num_tile_pipes--) { + i = backend_map & item_mask; + mask |= (1<>= item_width; + } + if (mask != 0) { + ctx->backend_mask = mask; + return; + } + } + + /* otherwise backup path for older kernels */ + + /* create buffer for event data */ + buffer = (struct r600_resource*) + pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM, + PIPE_USAGE_STAGING, ctx->max_db*16); + if (!buffer) + goto err; + va = r600_resource_va(ctx->b.screen, (void*)buffer); + + /* initialize buffer with zeroes */ + results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE); + if (results) { + memset(results, 0, ctx->max_db * 4 * 4); + ctx->ws->buffer_unmap(buffer->cs_buf); + + /* emit EVENT_WRITE for ZPASS_DONE */ + cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0); + cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1); + cs->buf[cs->cdw++] = va; + cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF; + + cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); + cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, &ctx->rings.gfx, buffer, RADEON_USAGE_WRITE); + + /* analyze results */ + results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ); + if (results) { + for(i = 0; i < ctx->max_db; i++) { + /* at least highest bit will be set if backend is used */ + if (results[i*4 + 1]) + mask |= (1<ws->buffer_unmap(buffer->cs_buf); + } + } + + pipe_resource_reference((struct pipe_resource**)&buffer, NULL); + + if (mask != 0) { + ctx->backend_mask = mask; + return; + } + +err: + /* fallback to old method - set num_backends lower bits to 1 */ + ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends); + return; +} + +void r600_query_init(struct r600_common_context *rctx) +{ + rctx->b.create_query = r600_create_query; + rctx->b.destroy_query = r600_destroy_query; + rctx->b.begin_query = r600_begin_query; + rctx->b.end_query = r600_end_query; + rctx->b.get_query_result = r600_get_query_result; + + if (((struct r600_common_screen*)rctx->b.screen)->info.r600_num_backends > 0) + rctx->b.render_condition = r600_render_condition; + + LIST_INITHEAD(&rctx->active_nontimer_queries); +} diff --git a/src/gallium/drivers/radeon/r600d_common.h b/src/gallium/drivers/radeon/r600d_common.h index 99146941570..357d26f4eb8 100644 --- a/src/gallium/drivers/radeon/r600d_common.h +++ b/src/gallium/drivers/radeon/r600d_common.h @@ -40,6 +40,7 @@ #define RADEON_CP_PACKET3_COMPUTE_MODE 0x00000002 #define PKT3_NOP 0x10 +#define PKT3_SET_PREDICATION 0x20 #define PKT3_STRMOUT_BUFFER_UPDATE 0x34 #define STRMOUT_STORE_BUFFER_FILLED_SIZE 1 #define STRMOUT_OFFSET_SOURCE(x) (((x) & 0x3) << 1) @@ -51,6 +52,7 @@ #define PKT3_WAIT_REG_MEM 0x3C #define WAIT_REG_MEM_EQUAL 3 #define PKT3_EVENT_WRITE 0x46 +#define PKT3_EVENT_WRITE_EOP 0x47 #define PKT3_SET_CONFIG_REG 0x68 #define PKT3_SET_CONTEXT_REG 0x69 #define PKT3_STRMOUT_BASE_UPDATE 0x72 /* r700 only */ @@ -82,6 +84,16 @@ * 5 - TS events */ +#define PREDICATION_OP_CLEAR 0x0 +#define PREDICATION_OP_ZPASS 0x1 +#define PREDICATION_OP_PRIMCOUNT 0x2 +#define PRED_OP(x) ((x) << 16) +#define PREDICATION_CONTINUE (1 << 31) +#define PREDICATION_HINT_WAIT (0 << 12) +#define PREDICATION_HINT_NOWAIT_DRAW (1 << 12) +#define PREDICATION_DRAW_NOT_VISIBLE (0 << 8) +#define PREDICATION_DRAW_VISIBLE (1 << 8) + /* R600-R700*/ #define R_008490_CP_STRMOUT_CNTL 0x008490 #define S_008490_OFFSET_UPDATE_DONE(x) (((x) & 0x1) << 0) diff --git a/src/gallium/drivers/radeonsi/si_pipe.c b/src/gallium/drivers/radeonsi/si_pipe.c index d76f3835084..662c5211f36 100644 --- a/src/gallium/drivers/radeonsi/si_pipe.c +++ b/src/gallium/drivers/radeonsi/si_pipe.c @@ -132,16 +132,14 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen, void * if (sctx == NULL) return NULL; - if (!r600_common_context_init(&sctx->b, &sscreen->b)) - goto fail; - - sctx->b.b.screen = screen; + sctx->b.b.screen = screen; /* this must be set first */ sctx->b.b.priv = priv; sctx->b.b.destroy = si_destroy_context; sctx->b.b.flush = si_flush_from_st; + sctx->screen = sscreen; /* Easy accessing of screen/winsys. */ - /* Easy accessing of screen/winsys. */ - sctx->screen = sscreen; + if (!r600_common_context_init(&sctx->b, &sscreen->b)) + goto fail; si_init_blit_functions(sctx); si_init_query_functions(sctx);