#include "r600_query.h"
#include "r600_cs.h"
#include "util/u_memory.h"
+#include "util/u_upload_mgr.h"
-struct r600_query_buffer {
- /* The buffer where query results are stored. */
- struct r600_resource *buf;
- /* Offset of the next free result after current query data */
- unsigned results_end;
- /* If a query buffer is full, a new buffer is created and the old one
- * is put in here. When we calculate the result, we sum up the samples
- * from all buffers. */
- struct r600_query_buffer *previous;
-};
+#include "tgsi/tgsi_text.h"
-struct r600_query {
- struct r600_query_ops *ops;
-
- /* The query buffer and how many results are in it. */
- struct r600_query_buffer buffer;
- /* The type of query */
- unsigned type;
- /* Size of the result in memory for both begin_query and end_query,
- * this can be one or two numbers, or it could even be a size of a structure. */
- unsigned result_size;
- /* The number of dwords for begin_query or end_query. */
- unsigned num_cs_dw;
- /* linked list of queries */
- struct list_head list;
- /* For transform feedback: which stream the query is for */
- unsigned stream;
+struct r600_hw_query_params {
+ unsigned start_offset;
+ unsigned end_offset;
+ unsigned fence_offset;
+ unsigned pair_stride;
+ unsigned pair_count;
};
/* Queries without buffer handling or suspend/resume. */
switch (type) {
case R600_QUERY_REQUESTED_VRAM: return RADEON_REQUESTED_VRAM_MEMORY;
case R600_QUERY_REQUESTED_GTT: return RADEON_REQUESTED_GTT_MEMORY;
+ case R600_QUERY_MAPPED_VRAM: return RADEON_MAPPED_VRAM;
+ case R600_QUERY_MAPPED_GTT: return RADEON_MAPPED_GTT;
case R600_QUERY_BUFFER_WAIT_TIME: return RADEON_BUFFER_WAIT_TIME_NS;
- case R600_QUERY_NUM_CS_FLUSHES: return RADEON_NUM_CS_FLUSHES;
+ case R600_QUERY_NUM_CTX_FLUSHES: return RADEON_NUM_CS_FLUSHES;
case R600_QUERY_NUM_BYTES_MOVED: return RADEON_NUM_BYTES_MOVED;
+ case R600_QUERY_NUM_EVICTIONS: return RADEON_NUM_EVICTIONS;
case R600_QUERY_VRAM_USAGE: return RADEON_VRAM_USAGE;
case R600_QUERY_GTT_USAGE: return RADEON_GTT_USAGE;
case R600_QUERY_GPU_TEMPERATURE: return RADEON_GPU_TEMPERATURE;
}
}
-static boolean r600_query_sw_begin(struct r600_common_context *rctx,
- struct r600_query *rquery)
+static bool r600_query_sw_begin(struct r600_common_context *rctx,
+ struct r600_query *rquery)
{
struct r600_query_sw *query = (struct r600_query_sw *)rquery;
case R600_QUERY_DRAW_CALLS:
query->begin_result = rctx->num_draw_calls;
break;
+ case R600_QUERY_SPILL_DRAW_CALLS:
+ query->begin_result = rctx->num_spill_draw_calls;
+ break;
+ case R600_QUERY_COMPUTE_CALLS:
+ query->begin_result = rctx->num_compute_calls;
+ break;
+ case R600_QUERY_SPILL_COMPUTE_CALLS:
+ query->begin_result = rctx->num_spill_compute_calls;
+ break;
+ case R600_QUERY_DMA_CALLS:
+ query->begin_result = rctx->num_dma_calls;
+ break;
+ case R600_QUERY_NUM_VS_FLUSHES:
+ query->begin_result = rctx->num_vs_flushes;
+ break;
+ case R600_QUERY_NUM_PS_FLUSHES:
+ query->begin_result = rctx->num_ps_flushes;
+ break;
+ case R600_QUERY_NUM_CS_FLUSHES:
+ query->begin_result = rctx->num_cs_flushes;
+ break;
case R600_QUERY_REQUESTED_VRAM:
case R600_QUERY_REQUESTED_GTT:
+ case R600_QUERY_MAPPED_VRAM:
+ case R600_QUERY_MAPPED_GTT:
case R600_QUERY_VRAM_USAGE:
case R600_QUERY_GTT_USAGE:
case R600_QUERY_GPU_TEMPERATURE:
case R600_QUERY_CURRENT_GPU_SCLK:
case R600_QUERY_CURRENT_GPU_MCLK:
+ case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
query->begin_result = 0;
break;
case R600_QUERY_BUFFER_WAIT_TIME:
- case R600_QUERY_NUM_CS_FLUSHES:
- case R600_QUERY_NUM_BYTES_MOVED: {
+ case R600_QUERY_NUM_CTX_FLUSHES:
+ case R600_QUERY_NUM_BYTES_MOVED:
+ case R600_QUERY_NUM_EVICTIONS: {
enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
query->begin_result = rctx->ws->query_value(rctx->ws, ws_id);
break;
case R600_QUERY_NUM_SHADERS_CREATED:
query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
break;
+ case R600_QUERY_GPIN_ASIC_ID:
+ case R600_QUERY_GPIN_NUM_SIMD:
+ case R600_QUERY_GPIN_NUM_RB:
+ case R600_QUERY_GPIN_NUM_SPI:
+ case R600_QUERY_GPIN_NUM_SE:
+ break;
default:
unreachable("r600_query_sw_begin: bad query type");
}
- return TRUE;
+ return true;
}
-static void r600_query_sw_end(struct r600_common_context *rctx,
+static bool r600_query_sw_end(struct r600_common_context *rctx,
struct r600_query *rquery)
{
struct r600_query_sw *query = (struct r600_query_sw *)rquery;
case PIPE_QUERY_TIMESTAMP_DISJOINT:
break;
case PIPE_QUERY_GPU_FINISHED:
- rctx->b.flush(&rctx->b, &query->fence, 0);
+ rctx->b.flush(&rctx->b, &query->fence, PIPE_FLUSH_DEFERRED);
break;
case R600_QUERY_DRAW_CALLS:
- query->begin_result = rctx->num_draw_calls;
+ query->end_result = rctx->num_draw_calls;
+ break;
+ case R600_QUERY_SPILL_DRAW_CALLS:
+ query->end_result = rctx->num_spill_draw_calls;
+ break;
+ case R600_QUERY_COMPUTE_CALLS:
+ query->end_result = rctx->num_compute_calls;
+ break;
+ case R600_QUERY_SPILL_COMPUTE_CALLS:
+ query->end_result = rctx->num_spill_compute_calls;
+ break;
+ case R600_QUERY_DMA_CALLS:
+ query->end_result = rctx->num_dma_calls;
+ break;
+ case R600_QUERY_NUM_VS_FLUSHES:
+ query->end_result = rctx->num_vs_flushes;
+ break;
+ case R600_QUERY_NUM_PS_FLUSHES:
+ query->end_result = rctx->num_ps_flushes;
+ break;
+ case R600_QUERY_NUM_CS_FLUSHES:
+ query->end_result = rctx->num_cs_flushes;
break;
case R600_QUERY_REQUESTED_VRAM:
case R600_QUERY_REQUESTED_GTT:
+ case R600_QUERY_MAPPED_VRAM:
+ case R600_QUERY_MAPPED_GTT:
case R600_QUERY_VRAM_USAGE:
case R600_QUERY_GTT_USAGE:
case R600_QUERY_GPU_TEMPERATURE:
case R600_QUERY_CURRENT_GPU_SCLK:
case R600_QUERY_CURRENT_GPU_MCLK:
case R600_QUERY_BUFFER_WAIT_TIME:
- case R600_QUERY_NUM_CS_FLUSHES:
- case R600_QUERY_NUM_BYTES_MOVED: {
+ case R600_QUERY_NUM_CTX_FLUSHES:
+ case R600_QUERY_NUM_BYTES_MOVED:
+ case R600_QUERY_NUM_EVICTIONS: {
enum radeon_value_id ws_id = winsys_id_from_type(query->b.type);
query->end_result = rctx->ws->query_value(rctx->ws, ws_id);
break;
query->begin_result = 0;
break;
case R600_QUERY_NUM_COMPILATIONS:
- query->begin_result = p_atomic_read(&rctx->screen->num_compilations);
+ query->end_result = p_atomic_read(&rctx->screen->num_compilations);
break;
case R600_QUERY_NUM_SHADERS_CREATED:
- query->begin_result = p_atomic_read(&rctx->screen->num_shaders_created);
+ query->end_result = p_atomic_read(&rctx->screen->num_shaders_created);
+ break;
+ case R600_QUERY_BACK_BUFFER_PS_DRAW_RATIO:
+ query->end_result = rctx->last_tex_ps_draw_ratio;
+ break;
+ case R600_QUERY_GPIN_ASIC_ID:
+ case R600_QUERY_GPIN_NUM_SIMD:
+ case R600_QUERY_GPIN_NUM_RB:
+ case R600_QUERY_GPIN_NUM_SPI:
+ case R600_QUERY_GPIN_NUM_SE:
break;
default:
unreachable("r600_query_sw_end: bad query type");
}
+
+ return true;
}
-static boolean r600_query_sw_get_result(struct r600_common_context *rctx,
- struct r600_query *rquery,
- boolean wait,
- union pipe_query_result *result)
+static bool r600_query_sw_get_result(struct r600_common_context *rctx,
+ struct r600_query *rquery,
+ bool wait,
+ union pipe_query_result *result)
{
struct r600_query_sw *query = (struct r600_query_sw *)rquery;
case PIPE_QUERY_TIMESTAMP_DISJOINT:
/* Convert from cycles per millisecond to cycles per second (Hz). */
result->timestamp_disjoint.frequency =
- (uint64_t)rctx->screen->info.r600_clock_crystal_freq * 1000;
- result->timestamp_disjoint.disjoint = FALSE;
- return TRUE;
+ (uint64_t)rctx->screen->info.clock_crystal_freq * 1000;
+ result->timestamp_disjoint.disjoint = false;
+ return true;
case PIPE_QUERY_GPU_FINISHED: {
struct pipe_screen *screen = rctx->b.screen;
- result->b = screen->fence_finish(screen, query->fence,
+ result->b = screen->fence_finish(screen, &rctx->b, query->fence,
wait ? PIPE_TIMEOUT_INFINITE : 0);
return result->b;
}
+
+ case R600_QUERY_GPIN_ASIC_ID:
+ result->u32 = 0;
+ return true;
+ case R600_QUERY_GPIN_NUM_SIMD:
+ result->u32 = rctx->screen->info.num_good_compute_units;
+ return true;
+ case R600_QUERY_GPIN_NUM_RB:
+ result->u32 = rctx->screen->info.num_render_backends;
+ return true;
+ case R600_QUERY_GPIN_NUM_SPI:
+ result->u32 = 1; /* all supported chips have one SPI per SE */
+ return true;
+ case R600_QUERY_GPIN_NUM_SE:
+ result->u32 = rctx->screen->info.max_se;
+ return true;
}
result->u64 = query->end_result - query->begin_result;
break;
}
- return TRUE;
+ return true;
}
+
static struct r600_query_ops sw_query_ops = {
.destroy = r600_query_sw_destroy,
.begin = r600_query_sw_begin,
.end = r600_query_sw_end,
- .get_result = r600_query_sw_get_result
+ .get_result = r600_query_sw_get_result,
+ .get_result_resource = NULL
};
static struct pipe_query *r600_query_sw_create(struct pipe_context *ctx,
struct r600_query_sw *query;
query = CALLOC_STRUCT(r600_query_sw);
- if (query == NULL)
+ if (!query)
return NULL;
query->b.type = query_type;
return (struct pipe_query *)query;
}
-static void r600_do_destroy_query(struct r600_common_context *, struct r600_query *);
-static boolean r600_do_begin_query(struct r600_common_context *, struct r600_query *);
-static void r600_do_end_query(struct r600_common_context *, struct r600_query *);
-static boolean r600_do_get_query_result(struct r600_common_context *,
- struct r600_query *, boolean wait,
- union pipe_query_result *result);
-
-static struct r600_query_ops legacy_query_ops = {
- .destroy = r600_do_destroy_query,
- .begin = r600_do_begin_query,
- .end = r600_do_end_query,
- .get_result = r600_do_get_query_result,
-};
-
-static bool r600_is_timer_query(unsigned type)
+void r600_query_hw_destroy(struct r600_common_context *rctx,
+ struct r600_query *rquery)
{
- return type == PIPE_QUERY_TIME_ELAPSED ||
- type == PIPE_QUERY_TIMESTAMP;
-}
+ struct r600_query_hw *query = (struct r600_query_hw *)rquery;
+ struct r600_query_buffer *prev = query->buffer.previous;
-static bool r600_query_needs_begin(unsigned type)
-{
- return type != PIPE_QUERY_TIMESTAMP;
+ /* Release all query buffers. */
+ while (prev) {
+ struct r600_query_buffer *qbuf = prev;
+ prev = prev->previous;
+ r600_resource_reference(&qbuf->buf, NULL);
+ FREE(qbuf);
+ }
+
+ r600_resource_reference(&query->buffer.buf, NULL);
+ FREE(rquery);
}
-static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx, unsigned type)
+static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx,
+ struct r600_query_hw *query)
{
- unsigned j, i, num_results, buf_size = 4096;
- uint32_t *results;
+ unsigned buf_size = MAX2(query->result_size,
+ ctx->screen->info.min_alloc_size);
/* Queries are normally read by the CPU after
* being written by the gpu, hence staging is probably a good
struct r600_resource *buf = (struct r600_resource*)
pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
PIPE_USAGE_STAGING, buf_size);
+ if (!buf)
+ return NULL;
- switch (type) {
- case PIPE_QUERY_OCCLUSION_COUNTER:
- case PIPE_QUERY_OCCLUSION_PREDICATE:
- results = r600_buffer_map_sync_with_rings(ctx, buf, PIPE_TRANSFER_WRITE);
- memset(results, 0, buf_size);
+ if (!query->ops->prepare_buffer(ctx, query, buf)) {
+ r600_resource_reference(&buf, NULL);
+ return NULL;
+ }
+
+ return buf;
+}
+
+static bool r600_query_hw_prepare_buffer(struct r600_common_context *ctx,
+ struct r600_query_hw *query,
+ struct r600_resource *buffer)
+{
+ /* Callers ensure that the buffer is currently unused by the GPU. */
+ uint32_t *results = ctx->ws->buffer_map(buffer->buf, NULL,
+ PIPE_TRANSFER_WRITE |
+ PIPE_TRANSFER_UNSYNCHRONIZED);
+ if (!results)
+ return false;
+
+ memset(results, 0, buffer->b.b.width0);
+
+ if (query->b.type == PIPE_QUERY_OCCLUSION_COUNTER ||
+ query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE) {
+ unsigned num_results;
+ unsigned i, j;
/* Set top bits for unused backends. */
- num_results = buf_size / (16 * ctx->max_db);
+ num_results = buffer->b.b.width0 / query->result_size;
for (j = 0; j < num_results; j++) {
for (i = 0; i < ctx->max_db; i++) {
if (!(ctx->backend_mask & (1<<i))) {
}
results += 4 * ctx->max_db;
}
+ }
+
+ return true;
+}
+
+static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
+ struct r600_query *rquery,
+ bool wait,
+ enum pipe_query_value_type result_type,
+ int index,
+ struct pipe_resource *resource,
+ unsigned offset);
+
+static struct r600_query_ops query_hw_ops = {
+ .destroy = r600_query_hw_destroy,
+ .begin = r600_query_hw_begin,
+ .end = r600_query_hw_end,
+ .get_result = r600_query_hw_get_result,
+ .get_result_resource = r600_query_hw_get_result_resource,
+};
+
+static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
+ struct r600_query_hw *query,
+ struct r600_resource *buffer,
+ uint64_t va);
+static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
+ struct r600_query_hw *query,
+ struct r600_resource *buffer,
+ uint64_t va);
+static void r600_query_hw_add_result(struct r600_common_context *ctx,
+ struct r600_query_hw *, void *buffer,
+ union pipe_query_result *result);
+static void r600_query_hw_clear_result(struct r600_query_hw *,
+ union pipe_query_result *);
+
+static struct r600_query_hw_ops query_hw_default_hw_ops = {
+ .prepare_buffer = r600_query_hw_prepare_buffer,
+ .emit_start = r600_query_hw_do_emit_start,
+ .emit_stop = r600_query_hw_do_emit_stop,
+ .clear_result = r600_query_hw_clear_result,
+ .add_result = r600_query_hw_add_result,
+};
+
+bool r600_query_hw_init(struct r600_common_context *rctx,
+ struct r600_query_hw *query)
+{
+ query->buffer.buf = r600_new_query_buffer(rctx, query);
+ if (!query->buffer.buf)
+ return false;
+
+ return true;
+}
+
+static struct pipe_query *r600_query_hw_create(struct r600_common_context *rctx,
+ unsigned query_type,
+ unsigned index)
+{
+ struct r600_query_hw *query = CALLOC_STRUCT(r600_query_hw);
+ if (!query)
+ return NULL;
+
+ query->b.type = query_type;
+ query->b.ops = &query_hw_ops;
+ query->ops = &query_hw_default_hw_ops;
+
+ switch (query_type) {
+ case PIPE_QUERY_OCCLUSION_COUNTER:
+ case PIPE_QUERY_OCCLUSION_PREDICATE:
+ query->result_size = 16 * rctx->max_db;
+ query->result_size += 16; /* for the fence + alignment */
+ query->num_cs_dw_begin = 6;
+ query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rctx->screen);
break;
case PIPE_QUERY_TIME_ELAPSED:
+ query->result_size = 24;
+ query->num_cs_dw_begin = 8;
+ query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rctx->screen);
+ break;
case PIPE_QUERY_TIMESTAMP:
+ query->result_size = 16;
+ query->num_cs_dw_end = 8 + r600_gfx_write_fence_dwords(rctx->screen);
+ query->flags = R600_QUERY_HW_FLAG_NO_START;
break;
case PIPE_QUERY_PRIMITIVES_EMITTED:
case PIPE_QUERY_PRIMITIVES_GENERATED:
case PIPE_QUERY_SO_STATISTICS:
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
+ /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
+ query->result_size = 32;
+ query->num_cs_dw_begin = 6;
+ query->num_cs_dw_end = 6;
+ query->stream = index;
+ break;
case PIPE_QUERY_PIPELINE_STATISTICS:
- results = r600_buffer_map_sync_with_rings(ctx, buf, PIPE_TRANSFER_WRITE);
- memset(results, 0, buf_size);
+ /* 11 values on EG, 8 on R600. */
+ query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16;
+ query->result_size += 8; /* for the fence + alignment */
+ query->num_cs_dw_begin = 6;
+ query->num_cs_dw_end = 6 + r600_gfx_write_fence_dwords(rctx->screen);
break;
default:
assert(0);
+ FREE(query);
+ return NULL;
}
- return buf;
+
+ if (!r600_query_hw_init(rctx, query)) {
+ FREE(query);
+ return NULL;
+ }
+
+ return (struct pipe_query *)query;
}
static void r600_update_occlusion_query_state(struct r600_common_context *rctx,
if (type == PIPE_QUERY_OCCLUSION_COUNTER ||
type == PIPE_QUERY_OCCLUSION_PREDICATE) {
bool old_enable = rctx->num_occlusion_queries != 0;
- bool enable;
+ bool old_perfect_enable =
+ rctx->num_perfect_occlusion_queries != 0;
+ bool enable, perfect_enable;
rctx->num_occlusion_queries += diff;
assert(rctx->num_occlusion_queries >= 0);
+ if (type == PIPE_QUERY_OCCLUSION_COUNTER) {
+ rctx->num_perfect_occlusion_queries += diff;
+ assert(rctx->num_perfect_occlusion_queries >= 0);
+ }
+
enable = rctx->num_occlusion_queries != 0;
+ perfect_enable = rctx->num_perfect_occlusion_queries != 0;
- if (enable != old_enable) {
+ if (enable != old_enable || perfect_enable != old_perfect_enable) {
rctx->set_occlusion_query_state(&rctx->b, enable);
}
}
}
-static unsigned event_type_for_stream(struct r600_query *query)
+static unsigned event_type_for_stream(struct r600_query_hw *query)
{
switch (query->stream) {
default:
}
}
-static void r600_emit_query_begin(struct r600_common_context *ctx, struct r600_query *query)
+static void r600_query_hw_do_emit_start(struct r600_common_context *ctx,
+ struct r600_query_hw *query,
+ struct r600_resource *buffer,
+ uint64_t va)
{
struct radeon_winsys_cs *cs = ctx->gfx.cs;
- uint64_t va;
-
- r600_update_occlusion_query_state(ctx, query->type, 1);
- r600_update_prims_generated_query_state(ctx, query->type, 1);
- ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw * 2, TRUE);
-
- /* Get a new query buffer if needed. */
- if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
- struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
- *qbuf = query->buffer;
- query->buffer.buf = r600_new_query_buffer(ctx, query->type);
- query->buffer.results_end = 0;
- query->buffer.previous = qbuf;
- }
-
- /* emit begin query */
- va = query->buffer.buf->gpu_address + query->buffer.results_end;
- switch (query->type) {
+ switch (query->b.type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
break;
case PIPE_QUERY_TIME_ELAPSED:
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
- radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
radeon_emit(cs, va);
radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
radeon_emit(cs, 0);
}
r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
RADEON_PRIO_QUERY);
-
- if (r600_is_timer_query(query->type))
- ctx->num_cs_dw_timer_queries_suspend += query->num_cs_dw;
- else
- ctx->num_cs_dw_nontimer_queries_suspend += query->num_cs_dw;
}
-static void r600_emit_query_end(struct r600_common_context *ctx, struct r600_query *query)
+static void r600_query_hw_emit_start(struct r600_common_context *ctx,
+ struct r600_query_hw *query)
{
- struct radeon_winsys_cs *cs = ctx->gfx.cs;
uint64_t va;
- /* The queries which need begin already called this in begin_query. */
- if (!r600_query_needs_begin(query->type)) {
- ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw, FALSE);
+ if (!query->buffer.buf)
+ return; // previous buffer allocation failure
+
+ r600_update_occlusion_query_state(ctx, query->b.type, 1);
+ r600_update_prims_generated_query_state(ctx, query->b.type, 1);
+
+ ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
+ true);
+
+ /* Get a new query buffer if needed. */
+ if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
+ struct r600_query_buffer *qbuf = MALLOC_STRUCT(r600_query_buffer);
+ *qbuf = query->buffer;
+ query->buffer.results_end = 0;
+ query->buffer.previous = qbuf;
+ query->buffer.buf = r600_new_query_buffer(ctx, query);
+ if (!query->buffer.buf)
+ return;
}
- va = query->buffer.buf->gpu_address;
+ /* emit begin query */
+ va = query->buffer.buf->gpu_address + query->buffer.results_end;
+
+ query->ops->emit_start(ctx, query, query->buffer.buf, va);
- /* emit end query */
- switch (query->type) {
+ ctx->num_cs_dw_queries_suspend += query->num_cs_dw_end;
+}
+
+static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
+ struct r600_query_hw *query,
+ struct r600_resource *buffer,
+ uint64_t va)
+{
+ struct radeon_winsys_cs *cs = ctx->gfx.cs;
+
+ switch (query->b.type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
- va += query->buffer.results_end + 8;
+ va += 8;
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
radeon_emit(cs, va);
radeon_emit(cs, (va >> 32) & 0xFFFF);
+
+ va += ctx->max_db * 16 - 8;
+ r600_gfx_write_fence(ctx, va, 0, 0x80000000);
break;
case PIPE_QUERY_PRIMITIVES_EMITTED:
case PIPE_QUERY_PRIMITIVES_GENERATED:
case PIPE_QUERY_SO_STATISTICS:
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
- va += query->buffer.results_end + query->result_size/2;
+ va += query->result_size/2;
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
radeon_emit(cs, EVENT_TYPE(event_type_for_stream(query)) | EVENT_INDEX(3));
radeon_emit(cs, va);
radeon_emit(cs, (va >> 32) & 0xFFFF);
break;
case PIPE_QUERY_TIME_ELAPSED:
- va += query->buffer.results_end + query->result_size/2;
+ va += 8;
/* fall through */
case PIPE_QUERY_TIMESTAMP:
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
- radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
radeon_emit(cs, va);
radeon_emit(cs, (3 << 29) | ((va >> 32) & 0xFFFF));
radeon_emit(cs, 0);
radeon_emit(cs, 0);
+
+ va += 8;
+ r600_gfx_write_fence(ctx, va, 0, 0x80000000);
break;
- case PIPE_QUERY_PIPELINE_STATISTICS:
- va += query->buffer.results_end + query->result_size/2;
+ case PIPE_QUERY_PIPELINE_STATISTICS: {
+ unsigned sample_size = (query->result_size - 8) / 2;
+
+ va += sample_size;
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SAMPLE_PIPELINESTAT) | EVENT_INDEX(2));
radeon_emit(cs, va);
radeon_emit(cs, (va >> 32) & 0xFFFF);
+
+ va += sample_size;
+ r600_gfx_write_fence(ctx, va, 0, 0x80000000);
break;
+ }
default:
assert(0);
}
r600_emit_reloc(ctx, &ctx->gfx, query->buffer.buf, RADEON_USAGE_WRITE,
RADEON_PRIO_QUERY);
+}
- query->buffer.results_end += query->result_size;
+static void r600_query_hw_emit_stop(struct r600_common_context *ctx,
+ struct r600_query_hw *query)
+{
+ uint64_t va;
- if (r600_query_needs_begin(query->type)) {
- if (r600_is_timer_query(query->type))
- ctx->num_cs_dw_timer_queries_suspend -= query->num_cs_dw;
- else
- ctx->num_cs_dw_nontimer_queries_suspend -= query->num_cs_dw;
+ if (!query->buffer.buf)
+ return; // previous buffer allocation failure
+
+ /* The queries which need begin already called this in begin_query. */
+ if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
+ ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, false);
}
- r600_update_occlusion_query_state(ctx, query->type, -1);
- r600_update_prims_generated_query_state(ctx, query->type, -1);
+ /* emit end query */
+ va = query->buffer.buf->gpu_address + query->buffer.results_end;
+
+ query->ops->emit_stop(ctx, query, query->buffer.buf, va);
+
+ query->buffer.results_end += query->result_size;
+
+ if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
+ ctx->num_cs_dw_queries_suspend -= query->num_cs_dw_end;
+
+ r600_update_occlusion_query_state(ctx, query->b.type, -1);
+ r600_update_prims_generated_query_state(ctx, query->b.type, -1);
}
static void r600_emit_query_predication(struct r600_common_context *ctx,
struct r600_atom *atom)
{
struct radeon_winsys_cs *cs = ctx->gfx.cs;
- struct r600_query *query = (struct r600_query*)ctx->render_cond;
+ struct r600_query_hw *query = (struct r600_query_hw *)ctx->render_cond;
struct r600_query_buffer *qbuf;
uint32_t op;
bool flag_wait;
flag_wait = ctx->render_cond_mode == PIPE_RENDER_COND_WAIT ||
ctx->render_cond_mode == PIPE_RENDER_COND_BY_REGION_WAIT;
- switch (query->type) {
+ switch (query->b.type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
op = PRED_OP(PREDICATION_OP_ZPASS);
static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned query_type, unsigned index)
{
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
- struct r600_query *query;
if (query_type == PIPE_QUERY_TIMESTAMP_DISJOINT ||
query_type == PIPE_QUERY_GPU_FINISHED ||
query_type >= PIPE_QUERY_DRIVER_SPECIFIC)
return r600_query_sw_create(ctx, query_type);
- query = CALLOC_STRUCT(r600_query);
- if (query == NULL)
- return NULL;
-
- query->type = query_type;
- query->ops = &legacy_query_ops;
-
- switch (query_type) {
- case PIPE_QUERY_OCCLUSION_COUNTER:
- case PIPE_QUERY_OCCLUSION_PREDICATE:
- query->result_size = 16 * rctx->max_db;
- query->num_cs_dw = 6;
- break;
- case PIPE_QUERY_TIME_ELAPSED:
- query->result_size = 16;
- query->num_cs_dw = 8;
- break;
- case PIPE_QUERY_TIMESTAMP:
- query->result_size = 8;
- query->num_cs_dw = 8;
- break;
- case PIPE_QUERY_PRIMITIVES_EMITTED:
- case PIPE_QUERY_PRIMITIVES_GENERATED:
- case PIPE_QUERY_SO_STATISTICS:
- case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
- /* NumPrimitivesWritten, PrimitiveStorageNeeded. */
- query->result_size = 32;
- query->num_cs_dw = 6;
- query->stream = index;
- break;
- case PIPE_QUERY_PIPELINE_STATISTICS:
- /* 11 values on EG, 8 on R600. */
- query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16;
- query->num_cs_dw = 6;
- break;
- default:
- assert(0);
- FREE(query);
- return NULL;
- }
-
- query->buffer.buf = r600_new_query_buffer(rctx, query_type);
- if (!query->buffer.buf) {
- FREE(query);
- return NULL;
- }
-
- return (struct pipe_query*)query;
+ return r600_query_hw_create(rctx, query_type, index);
}
static void r600_destroy_query(struct pipe_context *ctx, struct pipe_query *query)
rquery->ops->destroy(rctx, rquery);
}
-static void r600_do_destroy_query(struct r600_common_context *rctx,
- struct r600_query *rquery)
-{
- struct r600_query_buffer *prev = rquery->buffer.previous;
-
- /* Release all query buffers. */
- while (prev) {
- struct r600_query_buffer *qbuf = prev;
- prev = prev->previous;
- pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
- FREE(qbuf);
- }
-
- pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL);
- FREE(rquery);
-}
-
static boolean r600_begin_query(struct pipe_context *ctx,
struct pipe_query *query)
{
return rquery->ops->begin(rctx, rquery);
}
-static boolean r600_do_begin_query(struct r600_common_context *rctx,
- struct r600_query *rquery)
+void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
+ struct r600_query_hw *query)
{
- struct r600_query_buffer *prev = rquery->buffer.previous;
-
- if (!r600_query_needs_begin(rquery->type)) {
- assert(0);
- return false;
- }
+ struct r600_query_buffer *prev = query->buffer.previous;
/* Discard the old query buffers. */
while (prev) {
struct r600_query_buffer *qbuf = prev;
prev = prev->previous;
- pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
+ r600_resource_reference(&qbuf->buf, NULL);
FREE(qbuf);
}
+ query->buffer.results_end = 0;
+ query->buffer.previous = NULL;
+
/* Obtain a new buffer if the current one can't be mapped without a stall. */
- if (r600_rings_is_buffer_referenced(rctx, rquery->buffer.buf->cs_buf, RADEON_USAGE_READWRITE) ||
- !rctx->ws->buffer_wait(rquery->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
- pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL);
- rquery->buffer.buf = r600_new_query_buffer(rctx, rquery->type);
+ if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
+ !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
+ r600_resource_reference(&query->buffer.buf, NULL);
+ query->buffer.buf = r600_new_query_buffer(rctx, query);
+ } else {
+ if (!query->ops->prepare_buffer(rctx, query, query->buffer.buf))
+ r600_resource_reference(&query->buffer.buf, NULL);
}
+}
- rquery->buffer.results_end = 0;
- rquery->buffer.previous = NULL;
+bool r600_query_hw_begin(struct r600_common_context *rctx,
+ struct r600_query *rquery)
+{
+ struct r600_query_hw *query = (struct r600_query_hw *)rquery;
- r600_emit_query_begin(rctx, rquery);
+ if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
+ assert(0);
+ return false;
+ }
- if (r600_is_timer_query(rquery->type))
- LIST_ADDTAIL(&rquery->list, &rctx->active_timer_queries);
- else
- LIST_ADDTAIL(&rquery->list, &rctx->active_nontimer_queries);
- return true;
+ if (!(query->flags & R600_QUERY_HW_FLAG_BEGIN_RESUMES))
+ r600_query_hw_reset_buffers(rctx, query);
+
+ r600_query_hw_emit_start(rctx, query);
+ if (!query->buffer.buf)
+ return false;
+
+ LIST_ADDTAIL(&query->list, &rctx->active_queries);
+ return true;
}
-static void r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
+static bool r600_end_query(struct pipe_context *ctx, struct pipe_query *query)
{
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
struct r600_query *rquery = (struct r600_query *)query;
- rquery->ops->end(rctx, rquery);
+ return rquery->ops->end(rctx, rquery);
}
-static void r600_do_end_query(struct r600_common_context *rctx,
- struct r600_query *rquery)
+bool r600_query_hw_end(struct r600_common_context *rctx,
+ struct r600_query *rquery)
+{
+ struct r600_query_hw *query = (struct r600_query_hw *)rquery;
+
+ if (query->flags & R600_QUERY_HW_FLAG_NO_START)
+ r600_query_hw_reset_buffers(rctx, query);
+
+ r600_query_hw_emit_stop(rctx, query);
+
+ if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
+ LIST_DELINIT(&query->list);
+
+ if (!query->buffer.buf)
+ return false;
+
+ return true;
+}
+
+static void r600_get_hw_query_params(struct r600_common_context *rctx,
+ struct r600_query_hw *rquery, int index,
+ struct r600_hw_query_params *params)
{
- r600_emit_query_end(rctx, rquery);
+ params->pair_stride = 0;
+ params->pair_count = 1;
- if (r600_query_needs_begin(rquery->type))
- LIST_DELINIT(&rquery->list);
+ switch (rquery->b.type) {
+ case PIPE_QUERY_OCCLUSION_COUNTER:
+ case PIPE_QUERY_OCCLUSION_PREDICATE:
+ params->start_offset = 0;
+ params->end_offset = 8;
+ params->fence_offset = rctx->max_db * 16;
+ params->pair_stride = 16;
+ params->pair_count = rctx->max_db;
+ break;
+ case PIPE_QUERY_TIME_ELAPSED:
+ params->start_offset = 0;
+ params->end_offset = 8;
+ params->fence_offset = 16;
+ break;
+ case PIPE_QUERY_TIMESTAMP:
+ params->start_offset = 0;
+ params->end_offset = 0;
+ params->fence_offset = 8;
+ break;
+ case PIPE_QUERY_PRIMITIVES_EMITTED:
+ params->start_offset = 8;
+ params->end_offset = 24;
+ params->fence_offset = params->end_offset + 4;
+ break;
+ case PIPE_QUERY_PRIMITIVES_GENERATED:
+ params->start_offset = 0;
+ params->end_offset = 16;
+ params->fence_offset = params->end_offset + 4;
+ break;
+ case PIPE_QUERY_SO_STATISTICS:
+ params->start_offset = 8 - index * 8;
+ params->end_offset = 24 - index * 8;
+ params->fence_offset = params->end_offset + 4;
+ break;
+ case PIPE_QUERY_PIPELINE_STATISTICS:
+ {
+ /* Offsets apply to EG+ */
+ static const unsigned offsets[] = {56, 48, 24, 32, 40, 16, 8, 0, 64, 72, 80};
+ params->start_offset = offsets[index];
+ params->end_offset = 88 + offsets[index];
+ params->fence_offset = 2 * 88;
+ break;
+ }
+ default:
+ unreachable("r600_get_hw_query_params unsupported");
+ }
}
-static unsigned r600_query_read_result(char *map, unsigned start_index, unsigned end_index,
+static unsigned r600_query_read_result(void *map, unsigned start_index, unsigned end_index,
bool test_status_bit)
{
uint32_t *current_result = (uint32_t*)map;
return 0;
}
-static boolean r600_get_query_buffer_result(struct r600_common_context *ctx,
- struct r600_query *query,
- struct r600_query_buffer *qbuf,
- boolean wait,
- union pipe_query_result *result)
+static void r600_query_hw_add_result(struct r600_common_context *ctx,
+ struct r600_query_hw *query,
+ void *buffer,
+ union pipe_query_result *result)
{
- unsigned results_base = 0;
- char *map;
-
- map = r600_buffer_map_sync_with_rings(ctx, qbuf->buf,
- PIPE_TRANSFER_READ |
- (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
- if (!map)
- return FALSE;
-
- /* count all results across all data blocks */
- switch (query->type) {
- case PIPE_QUERY_OCCLUSION_COUNTER:
- while (results_base != qbuf->results_end) {
+ switch (query->b.type) {
+ case PIPE_QUERY_OCCLUSION_COUNTER: {
+ for (unsigned i = 0; i < ctx->max_db; ++i) {
+ unsigned results_base = i * 16;
result->u64 +=
- r600_query_read_result(map + results_base, 0, 2, true);
- results_base += 16;
+ r600_query_read_result(buffer + results_base, 0, 2, true);
}
break;
- case PIPE_QUERY_OCCLUSION_PREDICATE:
- while (results_base != qbuf->results_end) {
+ }
+ case PIPE_QUERY_OCCLUSION_PREDICATE: {
+ for (unsigned i = 0; i < ctx->max_db; ++i) {
+ unsigned results_base = i * 16;
result->b = result->b ||
- r600_query_read_result(map + results_base, 0, 2, true) != 0;
- results_base += 16;
+ r600_query_read_result(buffer + results_base, 0, 2, true) != 0;
}
break;
+ }
case PIPE_QUERY_TIME_ELAPSED:
- while (results_base != qbuf->results_end) {
- result->u64 +=
- r600_query_read_result(map + results_base, 0, 2, false);
- results_base += query->result_size;
- }
+ result->u64 += r600_query_read_result(buffer, 0, 2, false);
break;
case PIPE_QUERY_TIMESTAMP:
- {
- uint32_t *current_result = (uint32_t*)map;
- result->u64 = (uint64_t)current_result[0] |
- (uint64_t)current_result[1] << 32;
+ result->u64 = *(uint64_t*)buffer;
break;
- }
case PIPE_QUERY_PRIMITIVES_EMITTED:
/* SAMPLE_STREAMOUTSTATS stores this structure:
* {
* u64 PrimitiveStorageNeeded;
* }
* We only need NumPrimitivesWritten here. */
- while (results_base != qbuf->results_end) {
- result->u64 +=
- r600_query_read_result(map + results_base, 2, 6, true);
- results_base += query->result_size;
- }
+ result->u64 += r600_query_read_result(buffer, 2, 6, true);
break;
case PIPE_QUERY_PRIMITIVES_GENERATED:
/* Here we read PrimitiveStorageNeeded. */
- while (results_base != qbuf->results_end) {
- result->u64 +=
- r600_query_read_result(map + results_base, 0, 4, true);
- results_base += query->result_size;
- }
+ result->u64 += r600_query_read_result(buffer, 0, 4, true);
break;
case PIPE_QUERY_SO_STATISTICS:
- while (results_base != qbuf->results_end) {
- result->so_statistics.num_primitives_written +=
- r600_query_read_result(map + results_base, 2, 6, true);
- result->so_statistics.primitives_storage_needed +=
- r600_query_read_result(map + results_base, 0, 4, true);
- results_base += query->result_size;
- }
+ result->so_statistics.num_primitives_written +=
+ r600_query_read_result(buffer, 2, 6, true);
+ result->so_statistics.primitives_storage_needed +=
+ r600_query_read_result(buffer, 0, 4, true);
break;
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
- while (results_base != qbuf->results_end) {
- result->b = result->b ||
- r600_query_read_result(map + results_base, 2, 6, true) !=
- r600_query_read_result(map + results_base, 0, 4, true);
- results_base += query->result_size;
- }
+ result->b = result->b ||
+ r600_query_read_result(buffer, 2, 6, true) !=
+ r600_query_read_result(buffer, 0, 4, true);
break;
case PIPE_QUERY_PIPELINE_STATISTICS:
if (ctx->chip_class >= EVERGREEN) {
- while (results_base != qbuf->results_end) {
- result->pipeline_statistics.ps_invocations +=
- r600_query_read_result(map + results_base, 0, 22, false);
- result->pipeline_statistics.c_primitives +=
- r600_query_read_result(map + results_base, 2, 24, false);
- result->pipeline_statistics.c_invocations +=
- r600_query_read_result(map + results_base, 4, 26, false);
- result->pipeline_statistics.vs_invocations +=
- r600_query_read_result(map + results_base, 6, 28, false);
- result->pipeline_statistics.gs_invocations +=
- r600_query_read_result(map + results_base, 8, 30, false);
- result->pipeline_statistics.gs_primitives +=
- r600_query_read_result(map + results_base, 10, 32, false);
- result->pipeline_statistics.ia_primitives +=
- r600_query_read_result(map + results_base, 12, 34, false);
- result->pipeline_statistics.ia_vertices +=
- r600_query_read_result(map + results_base, 14, 36, false);
- result->pipeline_statistics.hs_invocations +=
- r600_query_read_result(map + results_base, 16, 38, false);
- result->pipeline_statistics.ds_invocations +=
- r600_query_read_result(map + results_base, 18, 40, false);
- result->pipeline_statistics.cs_invocations +=
- r600_query_read_result(map + results_base, 20, 42, false);
- results_base += query->result_size;
- }
+ result->pipeline_statistics.ps_invocations +=
+ r600_query_read_result(buffer, 0, 22, false);
+ result->pipeline_statistics.c_primitives +=
+ r600_query_read_result(buffer, 2, 24, false);
+ result->pipeline_statistics.c_invocations +=
+ r600_query_read_result(buffer, 4, 26, false);
+ result->pipeline_statistics.vs_invocations +=
+ r600_query_read_result(buffer, 6, 28, false);
+ result->pipeline_statistics.gs_invocations +=
+ r600_query_read_result(buffer, 8, 30, false);
+ result->pipeline_statistics.gs_primitives +=
+ r600_query_read_result(buffer, 10, 32, false);
+ result->pipeline_statistics.ia_primitives +=
+ r600_query_read_result(buffer, 12, 34, false);
+ result->pipeline_statistics.ia_vertices +=
+ r600_query_read_result(buffer, 14, 36, false);
+ result->pipeline_statistics.hs_invocations +=
+ r600_query_read_result(buffer, 16, 38, false);
+ result->pipeline_statistics.ds_invocations +=
+ r600_query_read_result(buffer, 18, 40, false);
+ result->pipeline_statistics.cs_invocations +=
+ r600_query_read_result(buffer, 20, 42, false);
} else {
- while (results_base != qbuf->results_end) {
- result->pipeline_statistics.ps_invocations +=
- r600_query_read_result(map + results_base, 0, 16, false);
- result->pipeline_statistics.c_primitives +=
- r600_query_read_result(map + results_base, 2, 18, false);
- result->pipeline_statistics.c_invocations +=
- r600_query_read_result(map + results_base, 4, 20, false);
- result->pipeline_statistics.vs_invocations +=
- r600_query_read_result(map + results_base, 6, 22, false);
- result->pipeline_statistics.gs_invocations +=
- r600_query_read_result(map + results_base, 8, 24, false);
- result->pipeline_statistics.gs_primitives +=
- r600_query_read_result(map + results_base, 10, 26, false);
- result->pipeline_statistics.ia_primitives +=
- r600_query_read_result(map + results_base, 12, 28, false);
- result->pipeline_statistics.ia_vertices +=
- r600_query_read_result(map + results_base, 14, 30, false);
- results_base += query->result_size;
- }
+ result->pipeline_statistics.ps_invocations +=
+ r600_query_read_result(buffer, 0, 16, false);
+ result->pipeline_statistics.c_primitives +=
+ r600_query_read_result(buffer, 2, 18, false);
+ result->pipeline_statistics.c_invocations +=
+ r600_query_read_result(buffer, 4, 20, false);
+ result->pipeline_statistics.vs_invocations +=
+ r600_query_read_result(buffer, 6, 22, false);
+ result->pipeline_statistics.gs_invocations +=
+ r600_query_read_result(buffer, 8, 24, false);
+ result->pipeline_statistics.gs_primitives +=
+ r600_query_read_result(buffer, 10, 26, false);
+ result->pipeline_statistics.ia_primitives +=
+ r600_query_read_result(buffer, 12, 28, false);
+ result->pipeline_statistics.ia_vertices +=
+ r600_query_read_result(buffer, 14, 30, false);
}
#if 0 /* for testing */
printf("Pipeline stats: IA verts=%llu, IA prims=%llu, VS=%llu, HS=%llu, "
default:
assert(0);
}
-
- return TRUE;
}
static boolean r600_get_query_result(struct pipe_context *ctx,
return rquery->ops->get_result(rctx, rquery, wait, result);
}
-static boolean r600_do_get_query_result(struct r600_common_context *rctx,
- struct r600_query *rquery,
- boolean wait, union pipe_query_result *result)
+static void r600_get_query_result_resource(struct pipe_context *ctx,
+ struct pipe_query *query,
+ boolean wait,
+ enum pipe_query_value_type result_type,
+ int index,
+ struct pipe_resource *resource,
+ unsigned offset)
+{
+ struct r600_common_context *rctx = (struct r600_common_context *)ctx;
+ struct r600_query *rquery = (struct r600_query *)query;
+
+ rquery->ops->get_result_resource(rctx, rquery, wait, result_type, index,
+ resource, offset);
+}
+
+static void r600_query_hw_clear_result(struct r600_query_hw *query,
+ union pipe_query_result *result)
{
+ util_query_clear_result(result, query->b.type);
+}
+
+bool r600_query_hw_get_result(struct r600_common_context *rctx,
+ struct r600_query *rquery,
+ bool wait, union pipe_query_result *result)
+{
+ struct r600_query_hw *query = (struct r600_query_hw *)rquery;
struct r600_query_buffer *qbuf;
- util_query_clear_result(result, rquery->type);
+ query->ops->clear_result(query, result);
+
+ for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) {
+ unsigned results_base = 0;
+ void *map;
+
+ map = r600_buffer_map_sync_with_rings(rctx, qbuf->buf,
+ PIPE_TRANSFER_READ |
+ (wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
+ if (!map)
+ return false;
- for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous) {
- if (!r600_get_query_buffer_result(rctx, rquery, qbuf, wait, result)) {
- return FALSE;
+ while (results_base != qbuf->results_end) {
+ query->ops->add_result(rctx, query, map + results_base,
+ result);
+ results_base += query->result_size;
}
}
/* Convert the time to expected units. */
if (rquery->type == PIPE_QUERY_TIME_ELAPSED ||
rquery->type == PIPE_QUERY_TIMESTAMP) {
- result->u64 = (1000000 * result->u64) / rctx->screen->info.r600_clock_crystal_freq;
+ result->u64 = (1000000 * result->u64) / rctx->screen->info.clock_crystal_freq;
+ }
+ return true;
+}
+
+/* Create the compute shader that is used to collect the results.
+ *
+ * One compute grid with a single thread is launched for every query result
+ * buffer. The thread (optionally) reads a previous summary buffer, then
+ * accumulates data from the query result buffer, and writes the result either
+ * to a summary buffer to be consumed by the next grid invocation or to the
+ * user-supplied buffer.
+ *
+ * Data layout:
+ *
+ * CONST
+ * 0.x = end_offset
+ * 0.y = result_stride
+ * 0.z = result_count
+ * 0.w = bit field:
+ * 1: read previously accumulated values
+ * 2: write accumulated values for chaining
+ * 4: write result available
+ * 8: convert result to boolean (0/1)
+ * 16: only read one dword and use that as result
+ * 32: apply timestamp conversion
+ * 64: store full 64 bits result
+ * 128: store signed 32 bits result
+ * 1.x = fence_offset
+ * 1.y = pair_stride
+ * 1.z = pair_count
+ *
+ * BUFFER[0] = query result buffer
+ * BUFFER[1] = previous summary buffer
+ * BUFFER[2] = next summary buffer or user-supplied buffer
+ */
+static void r600_create_query_result_shader(struct r600_common_context *rctx)
+{
+ /* TEMP[0].xy = accumulated result so far
+ * TEMP[0].z = result not available
+ *
+ * TEMP[1].x = current result index
+ * TEMP[1].y = current pair index
+ */
+ static const char text_tmpl[] =
+ "COMP\n"
+ "PROPERTY CS_FIXED_BLOCK_WIDTH 1\n"
+ "PROPERTY CS_FIXED_BLOCK_HEIGHT 1\n"
+ "PROPERTY CS_FIXED_BLOCK_DEPTH 1\n"
+ "DCL BUFFER[0]\n"
+ "DCL BUFFER[1]\n"
+ "DCL BUFFER[2]\n"
+ "DCL CONST[0..1]\n"
+ "DCL TEMP[0..5]\n"
+ "IMM[0] UINT32 {0, 31, 2147483647, 4294967295}\n"
+ "IMM[1] UINT32 {1, 2, 4, 8}\n"
+ "IMM[2] UINT32 {16, 32, 64, 128}\n"
+ "IMM[3] UINT32 {1000000, 0, %u, 0}\n" /* for timestamp conversion */
+
+ "AND TEMP[5], CONST[0].wwww, IMM[2].xxxx\n"
+ "UIF TEMP[5]\n"
+ /* Check result availability. */
+ "LOAD TEMP[1].x, BUFFER[0], CONST[1].xxxx\n"
+ "ISHR TEMP[0].z, TEMP[1].xxxx, IMM[0].yyyy\n"
+ "MOV TEMP[1], TEMP[0].zzzz\n"
+ "NOT TEMP[0].z, TEMP[0].zzzz\n"
+
+ /* Load result if available. */
+ "UIF TEMP[1]\n"
+ "LOAD TEMP[0].xy, BUFFER[0], IMM[0].xxxx\n"
+ "ENDIF\n"
+ "ELSE\n"
+ /* Load previously accumulated result if requested. */
+ "MOV TEMP[0], IMM[0].xxxx\n"
+ "AND TEMP[4], CONST[0].wwww, IMM[1].xxxx\n"
+ "UIF TEMP[4]\n"
+ "LOAD TEMP[0].xyz, BUFFER[1], IMM[0].xxxx\n"
+ "ENDIF\n"
+
+ "MOV TEMP[1].x, IMM[0].xxxx\n"
+ "BGNLOOP\n"
+ /* Break if accumulated result so far is not available. */
+ "UIF TEMP[0].zzzz\n"
+ "BRK\n"
+ "ENDIF\n"
+
+ /* Break if result_index >= result_count. */
+ "USGE TEMP[5], TEMP[1].xxxx, CONST[0].zzzz\n"
+ "UIF TEMP[5]\n"
+ "BRK\n"
+ "ENDIF\n"
+
+ /* Load fence and check result availability */
+ "UMAD TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy, CONST[1].xxxx\n"
+ "LOAD TEMP[5].x, BUFFER[0], TEMP[5].xxxx\n"
+ "ISHR TEMP[0].z, TEMP[5].xxxx, IMM[0].yyyy\n"
+ "NOT TEMP[0].z, TEMP[0].zzzz\n"
+ "UIF TEMP[0].zzzz\n"
+ "BRK\n"
+ "ENDIF\n"
+
+ "MOV TEMP[1].y, IMM[0].xxxx\n"
+ "BGNLOOP\n"
+ /* Load start and end. */
+ "UMUL TEMP[5].x, TEMP[1].xxxx, CONST[0].yyyy\n"
+ "UMAD TEMP[5].x, TEMP[1].yyyy, CONST[1].yyyy, TEMP[5].xxxx\n"
+ "LOAD TEMP[2].xy, BUFFER[0], TEMP[5].xxxx\n"
+
+ "UADD TEMP[5].x, TEMP[5].xxxx, CONST[0].xxxx\n"
+ "LOAD TEMP[3].xy, BUFFER[0], TEMP[5].xxxx\n"
+
+ "U64ADD TEMP[3].xy, TEMP[3], -TEMP[2]\n"
+ "U64ADD TEMP[0].xy, TEMP[0], TEMP[3]\n"
+
+ /* Increment pair index */
+ "UADD TEMP[1].y, TEMP[1].yyyy, IMM[1].xxxx\n"
+ "USGE TEMP[5], TEMP[1].yyyy, CONST[1].zzzz\n"
+ "UIF TEMP[5]\n"
+ "BRK\n"
+ "ENDIF\n"
+ "ENDLOOP\n"
+
+ /* Increment result index */
+ "UADD TEMP[1].x, TEMP[1].xxxx, IMM[1].xxxx\n"
+ "ENDLOOP\n"
+ "ENDIF\n"
+
+ "AND TEMP[4], CONST[0].wwww, IMM[1].yyyy\n"
+ "UIF TEMP[4]\n"
+ /* Store accumulated data for chaining. */
+ "STORE BUFFER[2].xyz, IMM[0].xxxx, TEMP[0]\n"
+ "ELSE\n"
+ "AND TEMP[4], CONST[0].wwww, IMM[1].zzzz\n"
+ "UIF TEMP[4]\n"
+ /* Store result availability. */
+ "NOT TEMP[0].z, TEMP[0]\n"
+ "AND TEMP[0].z, TEMP[0].zzzz, IMM[1].xxxx\n"
+ "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].zzzz\n"
+
+ "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
+ "UIF TEMP[4]\n"
+ "STORE BUFFER[2].y, IMM[0].xxxx, IMM[0].xxxx\n"
+ "ENDIF\n"
+ "ELSE\n"
+ /* Store result if it is available. */
+ "NOT TEMP[4], TEMP[0].zzzz\n"
+ "UIF TEMP[4]\n"
+ /* Apply timestamp conversion */
+ "AND TEMP[4], CONST[0].wwww, IMM[2].yyyy\n"
+ "UIF TEMP[4]\n"
+ "U64MUL TEMP[0].xy, TEMP[0], IMM[3].xyxy\n"
+ "U64DIV TEMP[0].xy, TEMP[0], IMM[3].zwzw\n"
+ "ENDIF\n"
+
+ /* Convert to boolean */
+ "AND TEMP[4], CONST[0].wwww, IMM[1].wwww\n"
+ "UIF TEMP[4]\n"
+ "U64SNE TEMP[0].x, TEMP[0].xyxy, IMM[0].xxxx\n"
+ "AND TEMP[0].x, TEMP[0].xxxx, IMM[1].xxxx\n"
+ "MOV TEMP[0].y, IMM[0].xxxx\n"
+ "ENDIF\n"
+
+ "AND TEMP[4], CONST[0].wwww, IMM[2].zzzz\n"
+ "UIF TEMP[4]\n"
+ "STORE BUFFER[2].xy, IMM[0].xxxx, TEMP[0].xyxy\n"
+ "ELSE\n"
+ /* Clamping */
+ "UIF TEMP[0].yyyy\n"
+ "MOV TEMP[0].x, IMM[0].wwww\n"
+ "ENDIF\n"
+
+ "AND TEMP[4], CONST[0].wwww, IMM[2].wwww\n"
+ "UIF TEMP[4]\n"
+ "UMIN TEMP[0].x, TEMP[0].xxxx, IMM[0].zzzz\n"
+ "ENDIF\n"
+
+ "STORE BUFFER[2].x, IMM[0].xxxx, TEMP[0].xxxx\n"
+ "ENDIF\n"
+ "ENDIF\n"
+ "ENDIF\n"
+ "ENDIF\n"
+
+ "END\n";
+
+ char text[sizeof(text_tmpl) + 32];
+ struct tgsi_token tokens[1024];
+ struct pipe_compute_state state = {};
+
+ /* Hard code the frequency into the shader so that the backend can
+ * use the full range of optimizations for divide-by-constant.
+ */
+ snprintf(text, sizeof(text), text_tmpl,
+ rctx->screen->info.clock_crystal_freq);
+
+ if (!tgsi_text_translate(text, tokens, ARRAY_SIZE(tokens))) {
+ assert(false);
+ return;
}
- return TRUE;
+
+ state.ir_type = PIPE_SHADER_IR_TGSI;
+ state.prog = tokens;
+
+ rctx->query_result_shader = rctx->b.create_compute_state(&rctx->b, &state);
+}
+
+static void r600_restore_qbo_state(struct r600_common_context *rctx,
+ struct r600_qbo_state *st)
+{
+ rctx->b.bind_compute_state(&rctx->b, st->saved_compute);
+
+ rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
+ pipe_resource_reference(&st->saved_const0.buffer, NULL);
+
+ rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo);
+ for (unsigned i = 0; i < 3; ++i)
+ pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
+}
+
+static void r600_query_hw_get_result_resource(struct r600_common_context *rctx,
+ struct r600_query *rquery,
+ bool wait,
+ enum pipe_query_value_type result_type,
+ int index,
+ struct pipe_resource *resource,
+ unsigned offset)
+{
+ struct r600_query_hw *query = (struct r600_query_hw *)rquery;
+ struct r600_query_buffer *qbuf;
+ struct r600_query_buffer *qbuf_prev;
+ struct pipe_resource *tmp_buffer = NULL;
+ unsigned tmp_buffer_offset = 0;
+ struct r600_qbo_state saved_state = {};
+ struct pipe_grid_info grid = {};
+ struct pipe_constant_buffer constant_buffer = {};
+ struct pipe_shader_buffer ssbo[3];
+ struct r600_hw_query_params params;
+ struct {
+ uint32_t end_offset;
+ uint32_t result_stride;
+ uint32_t result_count;
+ uint32_t config;
+ uint32_t fence_offset;
+ uint32_t pair_stride;
+ uint32_t pair_count;
+ } consts;
+
+ if (!rctx->query_result_shader) {
+ r600_create_query_result_shader(rctx);
+ if (!rctx->query_result_shader)
+ return;
+ }
+
+ if (query->buffer.previous) {
+ u_suballocator_alloc(rctx->allocator_zeroed_memory, 16, 16,
+ &tmp_buffer_offset, &tmp_buffer);
+ if (!tmp_buffer)
+ return;
+ }
+
+ rctx->save_qbo_state(&rctx->b, &saved_state);
+
+ r600_get_hw_query_params(rctx, query, index >= 0 ? index : 0, ¶ms);
+ consts.end_offset = params.end_offset - params.start_offset;
+ consts.fence_offset = params.fence_offset - params.start_offset;
+ consts.result_stride = query->result_size;
+ consts.pair_stride = params.pair_stride;
+ consts.pair_count = params.pair_count;
+
+ constant_buffer.buffer_size = sizeof(consts);
+ constant_buffer.user_buffer = &consts;
+
+ ssbo[1].buffer = tmp_buffer;
+ ssbo[1].buffer_offset = tmp_buffer_offset;
+ ssbo[1].buffer_size = 16;
+
+ ssbo[2] = ssbo[1];
+
+ rctx->b.bind_compute_state(&rctx->b, rctx->query_result_shader);
+
+ grid.block[0] = 1;
+ grid.block[1] = 1;
+ grid.block[2] = 1;
+ grid.grid[0] = 1;
+ grid.grid[1] = 1;
+ grid.grid[2] = 1;
+
+ consts.config = 0;
+ if (index < 0)
+ consts.config |= 4;
+ if (query->b.type == PIPE_QUERY_OCCLUSION_PREDICATE ||
+ query->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE)
+ consts.config |= 8;
+ else if (query->b.type == PIPE_QUERY_TIMESTAMP ||
+ query->b.type == PIPE_QUERY_TIME_ELAPSED)
+ consts.config |= 32;
+
+ switch (result_type) {
+ case PIPE_QUERY_TYPE_U64:
+ case PIPE_QUERY_TYPE_I64:
+ consts.config |= 64;
+ break;
+ case PIPE_QUERY_TYPE_I32:
+ consts.config |= 128;
+ break;
+ case PIPE_QUERY_TYPE_U32:
+ break;
+ }
+
+ rctx->flags |= rctx->screen->barrier_flags.cp_to_L2;
+
+ for (qbuf = &query->buffer; qbuf; qbuf = qbuf_prev) {
+ if (query->b.type != PIPE_QUERY_TIMESTAMP) {
+ qbuf_prev = qbuf->previous;
+ consts.result_count = qbuf->results_end / query->result_size;
+ consts.config &= ~3;
+ if (qbuf != &query->buffer)
+ consts.config |= 1;
+ if (qbuf->previous)
+ consts.config |= 2;
+ } else {
+ /* Only read the last timestamp. */
+ qbuf_prev = NULL;
+ consts.result_count = 0;
+ consts.config |= 16;
+ params.start_offset += qbuf->results_end - query->result_size;
+ }
+
+ rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
+
+ ssbo[0].buffer = &qbuf->buf->b.b;
+ ssbo[0].buffer_offset = params.start_offset;
+ ssbo[0].buffer_size = qbuf->results_end - params.start_offset;
+
+ if (!qbuf->previous) {
+ ssbo[2].buffer = resource;
+ ssbo[2].buffer_offset = offset;
+ ssbo[2].buffer_size = 8;
+
+ ((struct r600_resource *)resource)->TC_L2_dirty = true;
+ }
+
+ rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo);
+
+ if (wait && qbuf == &query->buffer) {
+ uint64_t va;
+
+ /* Wait for result availability. Wait only for readiness
+ * of the last entry, since the fence writes should be
+ * serialized in the CP.
+ */
+ va = qbuf->buf->gpu_address + qbuf->results_end - query->result_size;
+ va += params.fence_offset;
+
+ r600_gfx_wait_fence(rctx, va, 0x80000000, 0x80000000);
+ }
+
+ rctx->b.launch_grid(&rctx->b, &grid);
+ rctx->flags |= rctx->screen->barrier_flags.compute_to_L2;
+ }
+
+ r600_restore_qbo_state(rctx, &saved_state);
+ pipe_resource_reference(&tmp_buffer, NULL);
}
static void r600_render_condition(struct pipe_context *ctx,
uint mode)
{
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
- struct r600_query *rquery = (struct r600_query*)query;
+ struct r600_query_hw *rquery = (struct r600_query_hw *)query;
struct r600_query_buffer *qbuf;
struct r600_atom *atom = &rctx->render_cond_atom;
/* Compute the size of SET_PREDICATION packets. */
atom->num_dw = 0;
- for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
- atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
+ if (query) {
+ for (qbuf = &rquery->buffer; qbuf; qbuf = qbuf->previous)
+ atom->num_dw += (qbuf->results_end / rquery->result_size) * 5;
+ }
rctx->set_atom_dirty(rctx, atom, query != NULL);
}
-static void r600_suspend_queries(struct r600_common_context *ctx,
- struct list_head *query_list,
- unsigned *num_cs_dw_queries_suspend)
+void r600_suspend_queries(struct r600_common_context *ctx)
{
- struct r600_query *query;
+ struct r600_query_hw *query;
- LIST_FOR_EACH_ENTRY(query, query_list, list) {
- r600_emit_query_end(ctx, query);
+ LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
+ r600_query_hw_emit_stop(ctx, query);
}
- assert(*num_cs_dw_queries_suspend == 0);
-}
-
-void r600_suspend_nontimer_queries(struct r600_common_context *ctx)
-{
- r600_suspend_queries(ctx, &ctx->active_nontimer_queries,
- &ctx->num_cs_dw_nontimer_queries_suspend);
-}
-
-void r600_suspend_timer_queries(struct r600_common_context *ctx)
-{
- r600_suspend_queries(ctx, &ctx->active_timer_queries,
- &ctx->num_cs_dw_timer_queries_suspend);
+ assert(ctx->num_cs_dw_queries_suspend == 0);
}
static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx,
struct list_head *query_list)
{
- struct r600_query *query;
+ struct r600_query_hw *query;
unsigned num_dw = 0;
LIST_FOR_EACH_ENTRY(query, query_list, list) {
/* begin + end */
- num_dw += query->num_cs_dw * 2;
+ num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
/* Workaround for the fact that
* num_cs_dw_nontimer_queries_suspend is incremented for every
* resumed query, which raises the bar in need_cs_space for
* queries about to be resumed.
*/
- num_dw += query->num_cs_dw;
+ num_dw += query->num_cs_dw_end;
}
/* primitives generated query */
num_dw += ctx->streamout.enable_atom.num_dw;
return num_dw;
}
-static void r600_resume_queries(struct r600_common_context *ctx,
- struct list_head *query_list,
- unsigned *num_cs_dw_queries_suspend)
+void r600_resume_queries(struct r600_common_context *ctx)
{
- struct r600_query *query;
- unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, query_list);
+ struct r600_query_hw *query;
+ unsigned num_cs_dw = r600_queries_num_cs_dw_for_resuming(ctx, &ctx->active_queries);
- assert(*num_cs_dw_queries_suspend == 0);
+ assert(ctx->num_cs_dw_queries_suspend == 0);
/* Check CS space here. Resuming must not be interrupted by flushes. */
- ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, TRUE);
+ ctx->need_gfx_cs_space(&ctx->b, num_cs_dw, true);
- LIST_FOR_EACH_ENTRY(query, query_list, list) {
- r600_emit_query_begin(ctx, query);
+ LIST_FOR_EACH_ENTRY(query, &ctx->active_queries, list) {
+ r600_query_hw_emit_start(ctx, query);
}
}
-void r600_resume_nontimer_queries(struct r600_common_context *ctx)
-{
- r600_resume_queries(ctx, &ctx->active_nontimer_queries,
- &ctx->num_cs_dw_nontimer_queries_suspend);
-}
-
-void r600_resume_timer_queries(struct r600_common_context *ctx)
-{
- r600_resume_queries(ctx, &ctx->active_timer_queries,
- &ctx->num_cs_dw_timer_queries_suspend);
-}
-
/* Get backends mask */
void r600_query_init_backend_mask(struct r600_common_context *ctx)
{
struct radeon_winsys_cs *cs = ctx->gfx.cs;
struct r600_resource *buffer;
uint32_t *results;
- unsigned num_backends = ctx->screen->info.r600_num_backends;
+ unsigned num_backends = ctx->screen->info.num_render_backends;
unsigned i, mask = 0;
/* if backend_map query is supported by the kernel */
- if (ctx->screen->info.r600_backend_map_valid) {
- unsigned num_tile_pipes = ctx->screen->info.r600_num_tile_pipes;
- unsigned backend_map = ctx->screen->info.r600_backend_map;
+ if (ctx->screen->info.r600_gb_backend_map_valid) {
+ unsigned num_tile_pipes = ctx->screen->info.num_tile_pipes;
+ unsigned backend_map = ctx->screen->info.r600_gb_backend_map;
unsigned item_width, item_mask;
if (ctx->chip_class >= EVERGREEN) {
item_mask = 0x3;
}
- while(num_tile_pipes--) {
+ while (num_tile_pipes--) {
i = backend_map & item_mask;
mask |= (1<<i);
backend_map >>= item_width;
}
}
- pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
+ r600_resource_reference(&buffer, NULL);
if (mask != 0) {
ctx->backend_mask = mask;
return;
}
-#define X(name_, query_type_, type_, result_type_) \
+#define XFULL(name_, query_type_, type_, result_type_, group_id_) \
{ \
.name = name_, \
.query_type = R600_QUERY_##query_type_, \
.type = PIPE_DRIVER_QUERY_TYPE_##type_, \
.result_type = PIPE_DRIVER_QUERY_RESULT_TYPE_##result_type_, \
- .group_id = ~(unsigned)0 \
+ .group_id = group_id_ \
}
+#define X(name_, query_type_, type_, result_type_) \
+ XFULL(name_, query_type_, type_, result_type_, ~(unsigned)0)
+
+#define XG(group_, name_, query_type_, type_, result_type_) \
+ XFULL(name_, query_type_, type_, result_type_, R600_QUERY_GROUP_##group_)
+
static struct pipe_driver_query_info r600_driver_query_list[] = {
X("num-compilations", NUM_COMPILATIONS, UINT64, CUMULATIVE),
X("num-shaders-created", NUM_SHADERS_CREATED, UINT64, CUMULATIVE),
- X("draw-calls", DRAW_CALLS, UINT64, CUMULATIVE),
+ X("draw-calls", DRAW_CALLS, UINT64, AVERAGE),
+ X("spill-draw-calls", SPILL_DRAW_CALLS, UINT64, AVERAGE),
+ X("compute-calls", COMPUTE_CALLS, UINT64, AVERAGE),
+ X("spill-compute-calls", SPILL_COMPUTE_CALLS, UINT64, AVERAGE),
+ X("dma-calls", DMA_CALLS, UINT64, AVERAGE),
+ X("num-vs-flushes", NUM_VS_FLUSHES, UINT64, AVERAGE),
+ X("num-ps-flushes", NUM_PS_FLUSHES, UINT64, AVERAGE),
+ X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, AVERAGE),
X("requested-VRAM", REQUESTED_VRAM, BYTES, AVERAGE),
X("requested-GTT", REQUESTED_GTT, BYTES, AVERAGE),
+ X("mapped-VRAM", MAPPED_VRAM, BYTES, AVERAGE),
+ X("mapped-GTT", MAPPED_GTT, BYTES, AVERAGE),
X("buffer-wait-time", BUFFER_WAIT_TIME, MICROSECONDS, CUMULATIVE),
- X("num-cs-flushes", NUM_CS_FLUSHES, UINT64, CUMULATIVE),
+ X("num-ctx-flushes", NUM_CTX_FLUSHES, UINT64, AVERAGE),
X("num-bytes-moved", NUM_BYTES_MOVED, BYTES, CUMULATIVE),
+ X("num-evictions", NUM_EVICTIONS, UINT64, CUMULATIVE),
X("VRAM-usage", VRAM_USAGE, BYTES, AVERAGE),
X("GTT-usage", GTT_USAGE, BYTES, AVERAGE),
+ X("back-buffer-ps-draw-ratio", BACK_BUFFER_PS_DRAW_RATIO, UINT64, AVERAGE),
+
+ /* GPIN queries are for the benefit of old versions of GPUPerfStudio,
+ * which use it as a fallback path to detect the GPU type.
+ *
+ * Note: The names of these queries are significant for GPUPerfStudio
+ * (and possibly their order as well). */
+ XG(GPIN, "GPIN_000", GPIN_ASIC_ID, UINT, AVERAGE),
+ XG(GPIN, "GPIN_001", GPIN_NUM_SIMD, UINT, AVERAGE),
+ XG(GPIN, "GPIN_002", GPIN_NUM_RB, UINT, AVERAGE),
+ XG(GPIN, "GPIN_003", GPIN_NUM_SPI, UINT, AVERAGE),
+ XG(GPIN, "GPIN_004", GPIN_NUM_SE, UINT, AVERAGE),
+
+ /* The following queries must be at the end of the list because their
+ * availability is adjusted dynamically based on the DRM version. */
X("GPU-load", GPU_LOAD, UINT64, AVERAGE),
X("temperature", GPU_TEMPERATURE, UINT64, AVERAGE),
X("shader-clock", CURRENT_GPU_SCLK, HZ, AVERAGE),
};
#undef X
+#undef XG
+#undef XFULL
static unsigned r600_get_num_queries(struct r600_common_screen *rscreen)
{
if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 42)
- return Elements(r600_driver_query_list);
+ return ARRAY_SIZE(r600_driver_query_list);
else if (rscreen->info.drm_major == 3)
- return Elements(r600_driver_query_list) - 3;
+ return ARRAY_SIZE(r600_driver_query_list) - 3;
else
- return Elements(r600_driver_query_list) - 4;
+ return ARRAY_SIZE(r600_driver_query_list) - 4;
}
static int r600_get_driver_query_info(struct pipe_screen *screen,
struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
unsigned num_queries = r600_get_num_queries(rscreen);
- if (!info)
- return num_queries;
+ if (!info) {
+ unsigned num_perfcounters =
+ r600_get_perfcounter_info(rscreen, 0, NULL);
+
+ return num_queries + num_perfcounters;
+ }
if (index >= num_queries)
- return 0;
+ return r600_get_perfcounter_info(rscreen, index - num_queries, info);
*info = r600_driver_query_list[index];
switch (info->query_type) {
case R600_QUERY_REQUESTED_VRAM:
case R600_QUERY_VRAM_USAGE:
+ case R600_QUERY_MAPPED_VRAM:
info->max_value.u64 = rscreen->info.vram_size;
break;
case R600_QUERY_REQUESTED_GTT:
case R600_QUERY_GTT_USAGE:
+ case R600_QUERY_MAPPED_GTT:
info->max_value.u64 = rscreen->info.gart_size;
break;
case R600_QUERY_GPU_TEMPERATURE:
break;
}
+ if (info->group_id != ~(unsigned)0 && rscreen->perfcounters)
+ info->group_id += rscreen->perfcounters->num_groups;
+
+ return 1;
+}
+
+/* Note: Unfortunately, GPUPerfStudio hardcodes the order of hardware
+ * performance counter groups, so be careful when changing this and related
+ * functions.
+ */
+static int r600_get_driver_query_group_info(struct pipe_screen *screen,
+ unsigned index,
+ struct pipe_driver_query_group_info *info)
+{
+ struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
+ unsigned num_pc_groups = 0;
+
+ if (rscreen->perfcounters)
+ num_pc_groups = rscreen->perfcounters->num_groups;
+
+ if (!info)
+ return num_pc_groups + R600_NUM_SW_QUERY_GROUPS;
+
+ if (index < num_pc_groups)
+ return r600_get_perfcounter_group_info(rscreen, index, info);
+
+ index -= num_pc_groups;
+ if (index >= R600_NUM_SW_QUERY_GROUPS)
+ return 0;
+
+ info->name = "GPIN";
+ info->max_active_queries = 5;
+ info->num_queries = 5;
return 1;
}
void r600_query_init(struct r600_common_context *rctx)
{
rctx->b.create_query = r600_create_query;
+ rctx->b.create_batch_query = r600_create_batch_query;
rctx->b.destroy_query = r600_destroy_query;
rctx->b.begin_query = r600_begin_query;
rctx->b.end_query = r600_end_query;
rctx->b.get_query_result = r600_get_query_result;
+ rctx->b.get_query_result_resource = r600_get_query_result_resource;
rctx->render_cond_atom.emit = r600_emit_query_predication;
- if (((struct r600_common_screen*)rctx->b.screen)->info.r600_num_backends > 0)
+ if (((struct r600_common_screen*)rctx->b.screen)->info.num_render_backends > 0)
rctx->b.render_condition = r600_render_condition;
- LIST_INITHEAD(&rctx->active_nontimer_queries);
- LIST_INITHEAD(&rctx->active_timer_queries);
+ LIST_INITHEAD(&rctx->active_queries);
}
void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
{
rscreen->b.get_driver_query_info = r600_get_driver_query_info;
+ rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
}