static struct r600_resource *r600_new_query_buffer(struct r600_common_context *ctx,
struct r600_query_hw *query)
{
- unsigned buf_size = 4096;
+ unsigned buf_size = MAX2(query->result_size, 4096);
/* Queries are normally read by the CPU after
* being written by the gpu, hence staging is probably a good
pipe_buffer_create(ctx->b.screen, PIPE_BIND_CUSTOM,
PIPE_USAGE_STAGING, buf_size);
- if (query->ops->prepare_buffer)
+ if (query->flags & R600_QUERY_HW_FLAG_PREDICATE)
query->ops->prepare_buffer(ctx, query, buf);
return buf;
static void r600_query_hw_prepare_buffer(struct r600_common_context *ctx,
struct r600_query_hw *query,
struct r600_resource *buffer)
- {
- uint32_t *results;
-
- if (query->b.type == PIPE_QUERY_TIME_ELAPSED ||
- query->b.type == PIPE_QUERY_TIMESTAMP)
- return;
-
- results = r600_buffer_map_sync_with_rings(ctx, buffer,
- PIPE_TRANSFER_WRITE);
+{
+ /* Callers ensure that the buffer is currently unused by the GPU. */
+ uint32_t *results = ctx->ws->buffer_map(buffer->cs_buf, NULL,
+ PIPE_TRANSFER_WRITE |
+ PIPE_TRANSFER_UNSYNCHRONIZED);
memset(results, 0, buffer->b.b.width0);
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
query->result_size = 16 * rctx->max_db;
- query->num_cs_dw = 6;
+ query->num_cs_dw_begin = 6;
+ query->num_cs_dw_end = 6;
+ query->flags |= R600_QUERY_HW_FLAG_PREDICATE;
break;
case PIPE_QUERY_TIME_ELAPSED:
query->result_size = 16;
- query->num_cs_dw = 8;
+ query->num_cs_dw_begin = 8;
+ query->num_cs_dw_end = 8;
query->flags = R600_QUERY_HW_FLAG_TIMER;
break;
case PIPE_QUERY_TIMESTAMP:
query->result_size = 8;
- query->num_cs_dw = 8;
+ query->num_cs_dw_end = 8;
query->flags = R600_QUERY_HW_FLAG_TIMER |
R600_QUERY_HW_FLAG_NO_START;
break;
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
/* NumPrimitivesWritten, PrimitiveStorageNeeded. */
query->result_size = 32;
- query->num_cs_dw = 6;
+ query->num_cs_dw_begin = 6;
+ query->num_cs_dw_end = 6;
query->stream = index;
+ query->flags |= R600_QUERY_HW_FLAG_PREDICATE;
break;
case PIPE_QUERY_PIPELINE_STATISTICS:
/* 11 values on EG, 8 on R600. */
query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16;
- query->num_cs_dw = 6;
+ query->num_cs_dw_begin = 6;
+ query->num_cs_dw_end = 6;
break;
default:
assert(0);
r600_update_occlusion_query_state(ctx, query->b.type, 1);
r600_update_prims_generated_query_state(ctx, query->b.type, 1);
- ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw * 2, TRUE);
+
+ ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_begin + query->num_cs_dw_end,
+ TRUE);
/* Get a new query buffer if needed. */
if (query->buffer.results_end + query->result_size > query->buffer.buf->b.b.width0) {
query->ops->emit_start(ctx, query, query->buffer.buf, va);
if (query->flags & R600_QUERY_HW_FLAG_TIMER)
- ctx->num_cs_dw_timer_queries_suspend += query->num_cs_dw;
+ ctx->num_cs_dw_timer_queries_suspend += query->num_cs_dw_end;
else
- ctx->num_cs_dw_nontimer_queries_suspend += query->num_cs_dw;
-
+ ctx->num_cs_dw_nontimer_queries_suspend += query->num_cs_dw_end;
}
static void r600_query_hw_do_emit_stop(struct r600_common_context *ctx,
/* The queries which need begin already called this in begin_query. */
if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
- ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw, FALSE);
+ ctx->need_gfx_cs_space(&ctx->b, query->num_cs_dw_end, FALSE);
}
/* emit end query */
if (!(query->flags & R600_QUERY_HW_FLAG_NO_START)) {
if (query->flags & R600_QUERY_HW_FLAG_TIMER)
- ctx->num_cs_dw_timer_queries_suspend -= query->num_cs_dw;
+ ctx->num_cs_dw_timer_queries_suspend -= query->num_cs_dw_end;
else
- ctx->num_cs_dw_nontimer_queries_suspend -= query->num_cs_dw;
+ ctx->num_cs_dw_nontimer_queries_suspend -= query->num_cs_dw_end;
}
r600_update_occlusion_query_state(ctx, query->b.type, -1);
return rquery->ops->begin(rctx, rquery);
}
-boolean r600_query_hw_begin(struct r600_common_context *rctx,
- struct r600_query *rquery)
+static void r600_query_hw_reset_buffers(struct r600_common_context *rctx,
+ struct r600_query_hw *query)
{
- struct r600_query_hw *query = (struct r600_query_hw *)rquery;
struct r600_query_buffer *prev = query->buffer.previous;
- if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
- assert(0);
- return false;
- }
-
/* Discard the old query buffers. */
while (prev) {
struct r600_query_buffer *qbuf = prev;
FREE(qbuf);
}
- /* Obtain a new buffer if the current one can't be mapped without a stall. */
- if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->cs_buf, RADEON_USAGE_READWRITE) ||
- !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
- pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
- query->buffer.buf = r600_new_query_buffer(rctx, query);
+ if (query->flags & R600_QUERY_HW_FLAG_PREDICATE) {
+ /* Obtain a new buffer if the current one can't be mapped without a stall. */
+ if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->cs_buf, RADEON_USAGE_READWRITE) ||
+ !rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
+ pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
+ query->buffer.buf = r600_new_query_buffer(rctx, query);
+ } else {
+ query->ops->prepare_buffer(rctx, query, query->buffer.buf);
+ }
}
query->buffer.results_end = 0;
query->buffer.previous = NULL;
+}
+
+boolean r600_query_hw_begin(struct r600_common_context *rctx,
+ struct r600_query *rquery)
+{
+ struct r600_query_hw *query = (struct r600_query_hw *)rquery;
+
+ if (query->flags & R600_QUERY_HW_FLAG_NO_START) {
+ assert(0);
+ return false;
+ }
+
+ r600_query_hw_reset_buffers(rctx, query);
r600_query_hw_emit_start(rctx, query);
{
struct r600_query_hw *query = (struct r600_query_hw *)rquery;
+ if (query->flags & R600_QUERY_HW_FLAG_NO_START)
+ r600_query_hw_reset_buffers(rctx, query);
+
r600_query_hw_emit_stop(rctx, query);
if (!(query->flags & R600_QUERY_HW_FLAG_NO_START))
LIST_FOR_EACH_ENTRY(query, query_list, list) {
/* begin + end */
- num_dw += query->num_cs_dw * 2;
+ num_dw += query->num_cs_dw_begin + query->num_cs_dw_end;
/* Workaround for the fact that
* num_cs_dw_nontimer_queries_suspend is incremented for every
* resumed query, which raises the bar in need_cs_space for
* queries about to be resumed.
*/
- num_dw += query->num_cs_dw;
+ num_dw += query->num_cs_dw_end;
}
/* primitives generated query */
num_dw += ctx->streamout.enable_atom.num_dw;
struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
unsigned num_queries = r600_get_num_queries(rscreen);
- if (!info)
- return num_queries;
+ if (!info) {
+ unsigned num_perfcounters =
+ r600_get_perfcounter_info(rscreen, 0, NULL);
+
+ return num_queries + num_perfcounters;
+ }
if (index >= num_queries)
- return 0;
+ return r600_get_perfcounter_info(rscreen, index - num_queries, info);
*info = r600_driver_query_list[index];
return 1;
}
+static int r600_get_driver_query_group_info(struct pipe_screen *screen,
+ unsigned index,
+ struct pipe_driver_query_group_info *info)
+{
+ struct r600_common_screen *rscreen = (struct r600_common_screen *)screen;
+
+ return r600_get_perfcounter_group_info(rscreen, index, info);
+}
+
void r600_query_init(struct r600_common_context *rctx)
{
rctx->b.create_query = r600_create_query;
+ rctx->b.create_batch_query = r600_create_batch_query;
rctx->b.destroy_query = r600_destroy_query;
rctx->b.begin_query = r600_begin_query;
rctx->b.end_query = r600_end_query;
void r600_init_screen_query_functions(struct r600_common_screen *rscreen)
{
rscreen->b.get_driver_query_info = r600_get_driver_query_info;
+ rscreen->b.get_driver_query_group_info = r600_get_driver_query_group_info;
}