static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx,
struct r600_query_hw *query)
{
- unsigned buf_size = 4096;
+ unsigned buf_size = MAX2(query->result_size, 4096);
/* Queries are normally read by the CPU after
* being written by the gpu, hence staging is probably a good
pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
PIPE_USAGE_STAGING, buf_size);
- if (query->ops->prepare_buffer)
+ if (query->flags & R600_QUERY_HW_FLAG_PREDICATE)
query->ops->prepare_buffer(ctx, query, buf);
return buf;
static void r600_query_hw_prepare_buffer(struct r600_common_context *ctx,
struct r600_query_hw *query,
struct r600_resource *buffer)
- {
- uint32_t *results;
-
- if (query->b.type == PIPE_QUERY_TIME_ELAPSED ||
- query->b.type == PIPE_QUERY_TIMESTAMP)
- return;
-
- results = r600_buffer_map_sync_with_rings(ctx, buffer,
- PIPE_TRANSFER_WRITE);
+{
+ /* Callers ensure that the buffer is currently unused by the GPU. */
+ uint32_t *results = ctx->ws->buffer_map(buffer->cs_buf, NULL,
+ PIPE_TRANSFER_WRITE |
+ PIPE_TRANSFER_UNSYNCHRONIZED);
memset(results, 0, buffer->b.b.width0);
query->result_size = 16 * rctx->max_db;
query->num_cs_dw_begin = 6;
query->num_cs_dw_end = 6;
+ query->flags |= R600_QUERY_HW_FLAG_PREDICATE;
break;
case PIPE_QUERY_TIME_ELAPSED:
query->result_size = 16;
query->num_cs_dw_begin = 6;
query->num_cs_dw_end = 6;
query->stream = index;
+ query->flags |= R600_QUERY_HW_FLAG_PREDICATE;
break;
case PIPE_QUERY_PIPELINE_STATISTICS:
/* 11 values on EG, 8 on R600. */
FREE(qbuf);
}
- /* Obtain a new buffer if the current one can't be mapped without a stall. */
- if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->cs_buf, RADEON_USAGE_READWRITE) ||
- !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
- pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
- query->buffer.buf = r600_new_query_buffer(rctx, query);
+ if (query->flags & R600_QUERY_HW_FLAG_PREDICATE) {
+ /* Obtain a new buffer if the current one can't be mapped without a stall. */
+ if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->cs_buf, RADEON_USAGE_READWRITE) ||
+ !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
+ pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
+ query->buffer.buf = r600_new_query_buffer(rctx, query);
+ } else {
+ query->ops->prepare_buffer(rctx, query, query->buffer.buf);
+ }
}
query->buffer.results_end = 0;
struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
unsigned num_queries = r600_get_num_queries(rscreen);
- if (!info)
- return num_queries;
+ if (!info) {
+ unsigned num_perfcounters =
+ r600_get_perfcounter_info(rscreen, 0, NULL);
+
+ return num_queries + num_perfcounters;
+ }
if (index >= num_queries)
- return 0;
+ return r600_get_perfcounter_info(rscreen, index - num_queries, info);
*info = r600_driver_query_list[index];
return 1;
}
+static int r600_get_driver_query_group_info(struct pipe_screen *screen,
+ unsigned index,
+ struct pipe_driver_query_group_info *info)
+{
+ struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
+
+ return r600_get_perfcounter_group_info(rscreen, index, info);
+}
+
void r600_query_init(struct r600_common_context *rctx)
{
rctx->b.create_query = r600_create_query;
+ rctx->b.create_batch_query = r600_create_batch_query;
rctx->b.destroy_query = r600_destroy_query;
rctx->b.begin_query = r600_begin_query;
rctx->b.end_query = r600_end_query;
void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
{
rscreen->b.get_driver_query_info = r600_get_driver_query_info;
+ rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
}